repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
jima80525/pyres | pyres/filemanager.py | 1 | 4287 | """
manages the files on the mp3 player
"""
import os
import re
import logging
import shutil
import pyres.utils as utils
def _double_digit_name(name):
""" Makes all numbers two digit numbers by adding a leading 0 where
necessary. Three digit or longer numbers are unaffected. """
# do a little clean up to start with
name = name.rstrip().replace('\\', '/')
name = name.rstrip('/') # make sure we don't have trailing / chars
# now pull of the trailing '3' on .mp3 filenames so we don't convert that
mp3suffix = ''
if name.endswith('mp3'):
name = name[:-1]
mp3suffix = '3'
# the regex produces a empty string at the end, skip that or zfill will
# expand it to 00. Note we cannot just remove the last element from the
# split as it does not always produce an empty element. Joy
elements = re.split(r'(\d+)', name)
if elements[-1] == '':
elements.pop()
result = ""
# this next section is a bit goofy. We need to tell whether a given
# element is a number (\d+) or not. Only if it's a number do we want to do
# the zfill on it. Else a name like '1b1a1z.1mp3' ends up adding a zero to
# the b a and z elements as well as the 1s. (in other words that string
# ends up with '010b010a010z.01mp3' instead of '01b01a01z.01mp3')
# It might be possible to be clever about the regex grouping on the split,
# but that idea is escaping me presently.
for element in elements:
try:
int(element)
except ValueError:
result += element
else:
result += element.zfill(2)
result += mp3suffix
return re.sub(' +', ' ', result) # remove double spaces
class FileManager(object):
""" Class to manage filesystem on mp3 player """
def __init__(self, base_dir):
# set default value for mp3 player
# base_dir = base_dir or "TestFiles"
base_dir = base_dir or "/media/jima/3C33-7AC4/"
self.base_dir = base_dir
utils.mkdir_p(self.base_dir)
def does_filesystem_exist(self):
""" Tests for existence - this is unused in real code, but it's handy
for unit tests. It was originally added to keep lint happy. """
return os.path.exists(self.base_dir)
def copy_audiobook(self, source_dir, dest_dir=None):
""" Main routine to convert and copy files to mp3 player """
if not dest_dir:
dest_dir = source_dir
print("Copying audiobook from %s" % source_dir)
else:
print("Coping audiobook from %s to %s" % (source_dir, dest_dir))
for root, dirs, files in os.walk(source_dir):
dirs.sort()
for dir_name in dirs:
full_dir = os.path.join(root, _double_digit_name(dir_name))
utils.mkdir_p(os.path.join(self.base_dir, full_dir))
for filename in sorted(files):
file_name = os.path.join(root, filename)
newfile = _double_digit_name(os.path.join(self.base_dir,
dest_dir, file_name))
logging.debug("copying %s to %s", file_name, newfile)
print("copying to %s" % (newfile))
shutil.copyfile(file_name, newfile)
def copy_episodes_to_player(self, episodes):
""" Copies the episodes to the mp3 player """
# make sure the podcast directory exists
podcast_dir = os.path.join(self.base_dir, "podcasts_" +
utils.current_date_time_as_string())
utils.mkdir_p(podcast_dir)
total = len(episodes)
counter = 0
for episode in sorted(episodes, key=lambda x: x.date):
episode.file_name = episode.file_name.replace('\\', '/')
(_, tail) = os.path.split(episode.file_name)
newfile = os.path.join(podcast_dir, tail)
logging.debug("copying %s to %s", episode.file_name, newfile)
shutil.copyfile(episode.file_name, newfile)
counter += 1
logging.debug("copied %s to %s", episode.file_name, newfile)
print("%2d/%d: copied %s to %s" % (counter, total,
episode.file_name, newfile))
| mit | -3,073,739,497,052,079,000 | 40.621359 | 79 | 0.586191 | false | 3.767135 | false | false | false |
bmars/sisko | sisko/app.py | 1 | 4908 | # Copyright (C) 2014 Brian Marshall
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from operator import attrgetter
from gi.repository import Gio
import urwid
from sisko.widgets import Dialog, OverlayStack, PathBar, FileItem
# Vim-like cursor movement.
urwid.command_map['k'] = 'cursor up'
urwid.command_map['j'] = 'cursor down'
urwid.command_map['h'] = 'cursor left'
urwid.command_map['l'] = 'cursor right'
class Application:
"""
Main application class.
"""
_PALETTE = [('dialog', 'black', 'light gray'),
('focused', 'white', 'dark blue'),
('folder', 'bold', ''),
('folder focused', 'white, bold', 'dark blue'),
('footer', 'light gray', 'dark gray'),
('footer key', 'white, bold', 'black'),
('path-bar', 'white', 'black'),
('path-bar current', 'white, bold', 'dark gray')]
_FOOTER = [('footer key', " Alt+H "), " ", _("Hidden Files"), " ",
('footer key', " Q "), " ", _("Quit")]
def __init__(self):
self._path_bar = PathBar()
self._files = urwid.SimpleFocusListWalker([])
self._toplevel = OverlayStack(urwid.Frame(
urwid.ListBox(self._files),
header=self._path_bar,
footer=urwid.AttrMap(urwid.Text(self._FOOTER), 'footer')))
self._show_hidden = False
def run(self, folder: Gio.File):
"""
Run the application, opening the given folder.
"""
self._open(folder)
main = urwid.MainLoop(self._toplevel, self._PALETTE,
unhandled_input=self._unhandled_input)
main.screen.set_terminal_properties(bright_is_bold=False)
main.run()
def _open(self, folder: Gio.File):
"""
Open a folder.
"""
children = folder.enumerate_children(
','.join([Gio.FILE_ATTRIBUTE_STANDARD_IS_HIDDEN,
Gio.FILE_ATTRIBUTE_STANDARD_IS_BACKUP,
Gio.FILE_ATTRIBUTE_STANDARD_NAME,
FileItem.FILE_ATTRIBUTES]),
Gio.FileQueryInfoFlags.NONE, None)
self._path_bar.location = folder
del self._files[:]
for info in children:
if self._show_hidden or not (info.get_is_hidden() or
info.get_is_backup()):
self._files.append(FileItem(folder.get_child(info.get_name()),
info))
list.sort(self._files, key=attrgetter('name_key'))
def _open_focused(self):
"""
Open the focused folder.
"""
focus = self._files.get_focus()[0]
if (focus is not None and
focus.info.get_file_type() == Gio.FileType.DIRECTORY):
self._open(focus.file)
def _trash_focused(self):
"""
Move the focused file to the Trash.
"""
focus = self._files.get_focus()[0]
if focus is None:
return
dialog = Dialog(
_("Are you sure you want to move \"{}\" to the Trash?").format(
focus.info.get_display_name()),
[(_("Cancel"), 'cancel'), (_("Move to Trash"), 'trash')])
def on_response(response_id):
if response_id == 'trash':
focus.file.trash(None)
del self._files[self._files.index(focus)]
self._toplevel.show_dialog(dialog, on_response)
def _unhandled_input(self, key):
"""
Handle application key commands.
"""
if urwid.command_map[key] == 'cursor left':
# Open previous folder in the path.
if self._path_bar.previous is not None:
self._open(self._path_bar.previous)
elif urwid.command_map[key] == 'cursor right':
# Open next folder in the path.
if self._path_bar.next is not None:
self._open(self._path_bar.next)
elif urwid.command_map[key] == 'activate':
self._open_focused()
elif key == 'delete':
self._trash_focused()
elif key == 'meta h':
self._show_hidden = not self._show_hidden
self._open(self._path_bar.location)
elif key in ('Q', 'q'):
raise urwid.ExitMainLoop
| gpl-3.0 | 5,710,978,842,891,577,000 | 36.753846 | 78 | 0.556031 | false | 3.987002 | false | false | false |
loehnertj/bsbgateway | bsbgateway/util/jos_parser.py | 1 | 15340 |
##############################################################################
#
# Copyright (C) Johannes Loehnert, 2013-2015
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
__all__ = [
"Token", "AstNode", "ParserContext", "StackTrace",
"seq", "multiple", "optional", "anyof",
"generate_lexer", "re", "generate_parser",
]
class Token(object):
ntype=0
content=None
srcoffset = 0
def __init__(o, ntype, content=None, srcoffset=0):
o.ntype = ntype
o.content = content
o.srcoffset = srcoffset
def __unicode__(o):
if not o.content:
return o.ntype
content = o.content
if not isinstance(content, unicode):
content = unicode(content)
if len(content)> 40:
content = content[:37] + u"..."
return unicode(o.ntype) + u"<" + content.replace("\n", "\n ") + u">"
def __str__(o):
if not o.content:
return o.ntype
content = o.content
if not isinstance(content, str):
content = str(content) # may throw encode error!!!
if len(content)> 40:
content = content[:37] + "..."
return o.ntype + "<" + content.replace("\n", "\n ") + ">"
__repr__ = __str__
def __call__(o):
return o.content
class AstNode:
"""represents a node of the abstract syntax tree
sequence is a list of the children. Its items
can be Tokens and AstNodes, mixing is allowed.
Take care: a single Token object is a valid tree!!
The tree structure will match the given grammar.
"""
ntype = ""
_children = None
def __init__(o, ntype, children):
o.ntype = ntype
o._children = children
def __str__(o):
s = o.ntype
for c in o._children:
s = s + "\n" + str(c).replace("\n", "\n ")
return s
def __unicode__(o):
s = unicode(o.ntype)
for c in o._children:
s = s + u"\n" + unicode(c).replace("\n", "\n ")
return s
def __getattr__(o, ntype):
"""gets the child node(s) having the given ntype.
Returns list of children that matches."""
result = []
for c in o._children:
if c.ntype == ntype:
result.append(c)
return result
def __iter__(o):
"""iterates over the children of this node."""
return o._children.__iter__()
def __call__(o):
"""return token content of this subtree.
The subtree must contain 0 or 1 token, multiple tokens cause an Exception.
Returns token.content (None if no token is there)."""
result = [c() for c in o._children]
result = [x for x in result if x is not None]
if len(result)>1:
raise ValueError("More than one token in subtree '%s'"%o.ntype)
if len(result)==0: return None
return result[0]
def __getitem__(o, key):
if isinstance(key, basestring):
l = o.__getattr__(key)
if len(l) > 1: raise ValueError("more than one %s child"%key)
if len(l)==0: return None
return l[0]
else:
return o._children[key]
content = property(__call__)
class ParserContext:
def __init__(o, tokens, ruleset):
o.tokens = tokens
o.ruleset = ruleset.copy()
o.stack_trace = None
o.stack = []
def push(o, symbol):
'''processor should push HIS OWN name before calling subprocessors, and .pop() afterwards.'''
o.stack.append(symbol)
def pop(o):
o.stack.pop()
def mktrace(o, symbol, errdescription="", reached_position=-1):
"""create a stack trace and remember it if a bigger position was reached."""
trace = StackTrace(o.stack+[symbol], errdescription, reached_position)
# remember the trace if there is none remembered, if it reached longer than the last one,
# or if it extends the last remembered one.
if o.stack_trace is None \
or o.stack_trace.reached_position < trace.reached_position:
o.stack_trace = trace
return trace
class StackTrace:
stack = []
reached_position =-1
errdescription = ""
def __init__(o, stack, errdescription="", reached_position=-1):
o.stack = stack[:]
o.errdescription = errdescription
o.reached_position = reached_position
def __str__(o):
return " ".join(o.stack) + " : '" + o.errdescription + "' (@token %d"%o.reached_position + ")"
def _convert(args):
"""reads the given list and replaces all strings with the corresponding _expect processor.
"""
processors = list()
for processor in args:
# replace strings by the '_expect' processor.
if isinstance(processor, basestring):
processor = _expect(processor)
processors.append(processor)
return processors
# Processors: ==========================================================
# each of those functions returns a processor for the token stream.
#def process(pcontext, position):
# trys to apply itself onto the tokens, if needed branches to another rule.
# it starts at position (index into tokens).
# Returns (partlist, new_position):
# partlist := LIST of AstNodes and Tokens
# StackTrace if not applicable.
# new_position: where further parsing must continue
def _expect(text):
"""Expect processor: if text is lowercase, expect something matching that rule.
if text is not lowercase, expect a token with that ntype.
You do not need to use it directly. All strings given as argument to another processor are directly matched.
"""
if text != text.lower():
# expect that particular TOKEN
def process(pcontext, position):
tokens = pcontext.tokens
if len(tokens) > position:
token = tokens[position]
else:
# after end of stream there comes an infinite amount of EOF tokens.
token = Token("EOF", None)
if token.ntype == text:
return [token], position+1
else:
return pcontext.mktrace("expect", errdescription="expected %s token"%text, reached_position=position), position
else:
# try whether the RULE applies
def process(pcontext, position):
pcontext.push("<%s>"%text)
result, new_position = _try_rule(pcontext, position, text)
pcontext.pop()
if isinstance(result, StackTrace):
return result, position
else:
return [result], new_position
return process
def seq(*args):
"""sequence processor: match the full sequence given as arguments."""
processors = _convert(args)
def process(pcontext, position):
result = []
start_position = position
for processor in processors:
subresult, position = processor(pcontext, position)
if isinstance(subresult, StackTrace):
# parsing failed further down.
# exception here: pass Stacktrace directly!
return subresult, start_position
else:
# append returned list to my result
result += subresult
#success
return result, position
return process
def multiple(*args):
"""multiple processor: match the sequence given as arguments n times (n>=0).
"""
subseq = seq(*args)
def process(pcontext, position):
result = []
while True:
pcontext.push("multiple")
subresult, new_position = subseq(pcontext, position)
pcontext.pop()
if isinstance(subresult, StackTrace):
# ignore trace and return what you got so far
break;
# detect and break endless loop
if len(subresult) == 0:
subresult = pcontext.mktrace("multiple", errdescription="endless loop detected", reached_position = position)
break;
result += subresult
position = new_position
return result, position
return process
def optional(*args):
"""optional processor: match the full sequence given as argument, or empty list"""
subseq = seq(*args)
def process(pcontext, position):
pcontext.push("optional")
subresult, new_position = subseq(pcontext, position)
pcontext.pop()
# only thing we have to do is convert StackTrace (no match) into a valid match.
if isinstance(subresult, StackTrace):
return [], position
else:
return subresult, new_position
return process
def anyof(*args):
"""anyof processor: try the given processors in turn, return the first match.
for alternative sequences, wrap them in seq(...).
"""
processors = _convert(args)
if len(processors)==0:
raise ArgumentError, "at least one alternative must be given to anyof"
def process(pcontext, position):
for processor in processors:
pcontext.push("anyof")
result, new_position = processor(pcontext, position)
pcontext.pop()
if not isinstance(result, StackTrace):
return result, new_position
# nothing matched
return pcontext.mktrace("anyof", "no alternative matched", position), position
return process
# END of processor generators! ============================
def _try_rule(pcontext, position, rulename):
""" takes a list of Tokens, the ruleset, and the name of the subtree rule.
Returns the AST (tree of AstNodes and/or tokens), or StackTrace if parsing failed.
"""
processor = pcontext.ruleset[rulename]
result, new_position = processor(pcontext, position)
if isinstance(result, StackTrace):
return result, position
else:
return AstNode(rulename, result), new_position
def generate_lexer(symbols, re_flags):
"""generates a lexer function for the given symbol set.
The symbol set is a list: ["SYMBOL1", "regex1", "SYMBOL2", "regex2", (...)].
Internally, re.Scanner is used. Look up the re module docs for regexp syntax.
Applied to a source string, the lexer function returns a list of Tokens, ie.
Token objects.
Use the empty string "" as symbol for symbols to be ignored (e.g. whitespace).
No Tokens are generated for those.
Mark the content of the token by a capture group in the regexp. If there is
a named group "content", it is set as Token content. If not, the first
capture group is set as Token content. If there are no capture groups,
content will be None.
Known Bug: the first regex will always have a capture group, by default the
whole match. If you want a token without content, put () at the end to
make the first capture group an empty string.
"""
# factory that returns a specific token-generator.
def factory(ntype, has_value):
def mktoken(regex, match):
if has_value:
# From the construction of the regex, the group having the
# index of the named group +1 is our value.
content = match.group(regex.groupindex[ntype] + 1)
else:
content = None
t = Token(ntype, content, match.start())
return t
return mktoken
regexs = []
symnames = []
funcs = {}
for sym, regex in zip(symbols[::2], symbols[1::2]):
if sym == "":
regexs.append("r(%s)"%(sym))
else:
symnames.append(sym)
regexs.append(r"(?P<%s>%s)"%(sym, regex))
# check if the regex defines groups i.e. delivers a value
p = re.compile(regex)
funcs[sym] = factory(sym, (p.groups>0))
regex = re.compile("|".join(regexs), re_flags)
def lexer(text):
tokens = []
lastpos = 0
for match in regex.finditer(text):
# find matched symbol
groups = match.groupdict()
for sym in symnames:
if groups[sym]:
tokens.append(funcs[sym](regex, match))
break;
lastpos = match.end()
return tokens, text[lastpos:]
return lexer
def generate_parser(ruleset, entrypoint=""):
"""generates a parser for the given grammar (ruleset).
The ruleset must be a dictionary with:
string keys (rulenames), which MUST be lowercase
processor or string values.
values:
processors are callbacks built by nesting the functions seq, multiple, optional, anyof.
string values match either another rule (if lowercase) or one token (if not lowercase).
In the latter case, the string value is compared against the Token.ntype.
by default, the rule "" (empty string as key) is used as entrypoint. You can give another
entrypoint for testing parts of the grammar.
"""
rules = ruleset.copy()
# convert string values into _expect
for key in rules.keys():
if isinstance(rules[key], basestring):
rules[key] = _expect(rules[key])
def parse(tokens):
""" takes a list of Tokens.
Returns (tree, pcontext) -
tree: the AST (tree of AstNodes and/or tokens), or None if parsing failed.
NOTE that a single Token is also a valid tree.
pcontext: final state of parsing contest (for error location)
.stack_trace: a StackTrace object if parsing failed
.stack_trace.stack: list of called operators
.stack_trace.reached_position: where the parser failed to continue
use it to validate if everything was read, or for error messages.
"""
pcontext = ParserContext(tokens, rules)
result, end_position = _try_rule(pcontext, 0, "")
if isinstance(result, StackTrace):
result = None
print pcontext.stack_trace
else:
pcontext.stack_trace = None
return result, pcontext
return parse
| gpl-3.0 | -6,327,910,968,418,215,000 | 36.93401 | 127 | 0.574185 | false | 4.427128 | false | false | false |
beiko-lab/gengis | bin/Lib/site-packages/scipy/sparse/csgraph/_validation.py | 1 | 2475 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse import csr_matrix, isspmatrix, isspmatrix_csc, isspmatrix_csr
from ._tools import csgraph_to_dense, csgraph_from_dense,\
csgraph_masked_from_dense, csgraph_from_masked
DTYPE = np.float64
def validate_graph(csgraph, directed, dtype=DTYPE,
csr_output=True, dense_output=True,
copy_if_dense=False, copy_if_sparse=False,
null_value_in=0, null_value_out=np.inf,
infinity_null=True, nan_null=True):
"""Routine for validation and conversion of csgraph inputs"""
if not (csr_output or dense_output):
raise ValueError("Internal: dense or csr output must be true")
# if undirected and csc storage, then transposing in-place
# is quicker than later converting to csr.
if (not directed) and isspmatrix_csc(csgraph):
csgraph = csgraph.T
if isspmatrix(csgraph):
if csr_output:
csgraph = csr_matrix(csgraph, dtype=DTYPE, copy=copy_if_sparse)
else:
csgraph = csgraph_to_dense(csgraph, null_value=null_value_out)
elif np.ma.is_masked(csgraph):
if dense_output:
mask = csgraph.mask
csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense)
csgraph[mask] = null_value_out
else:
csgraph = csgraph_from_masked(csgraph)
else:
if dense_output:
csgraph = csgraph_masked_from_dense(csgraph,
copy=copy_if_dense,
null_value=null_value_in,
nan_null=nan_null,
infinity_null=infinity_null)
mask = csgraph.mask
csgraph = np.asarray(csgraph.data, dtype=DTYPE)
csgraph[mask] = null_value_out
else:
csgraph = csgraph_from_dense(csgraph, null_value=null_value_in,
infinity_null=infinity_null,
nan_null=nan_null)
if csgraph.ndim != 2:
raise ValueError("compressed-sparse graph must be two dimensional")
if csgraph.shape[0] != csgraph.shape[1]:
raise ValueError("compressed-sparse graph must be shape (N, N)")
return csgraph
| gpl-3.0 | 2,765,522,862,928,460,300 | 40.672414 | 79 | 0.560404 | false | 4.004854 | false | false | false |
MobSF/Mobile-Security-Framework-MobSF | mobsf/StaticAnalyzer/views/ios/db_interaction.py | 1 | 7363 | """Module holding the functions for the db."""
import logging
from django.conf import settings
from mobsf.MobSF.utils import python_dict, python_list
from mobsf.StaticAnalyzer.models import StaticAnalyzerIOS
from mobsf.StaticAnalyzer.models import RecentScansDB
logger = logging.getLogger(__name__)
def get_context_from_db_entry(db_entry):
"""Return the context for IPA/ZIP from DB."""
try:
logger.info('Analysis is already Done. Fetching data from the DB...')
context = {
'version': settings.MOBSF_VER,
'title': 'Static Analysis',
'file_name': db_entry[0].FILE_NAME,
'app_name': db_entry[0].APP_NAME,
'app_type': db_entry[0].APP_TYPE,
'size': db_entry[0].SIZE,
'md5': db_entry[0].MD5,
'sha1': db_entry[0].SHA1,
'sha256': db_entry[0].SHA256,
'build': db_entry[0].BUILD,
'app_version': db_entry[0].APP_VERSION,
'sdk_name': db_entry[0].SDK_NAME,
'platform': db_entry[0].PLATFORM,
'min_os_version': db_entry[0].MIN_OS_VERSION,
'bundle_id': db_entry[0].BUNDLE_ID,
'bundle_url_types': python_list(db_entry[0].BUNDLE_URL_TYPES),
'bundle_supported_platforms':
python_list(db_entry[0].BUNDLE_SUPPORTED_PLATFORMS),
'icon_found': db_entry[0].ICON_FOUND,
'info_plist': db_entry[0].INFO_PLIST,
'binary_info': python_dict(db_entry[0].BINARY_INFO),
'permissions': python_list(db_entry[0].PERMISSIONS),
'ats_analysis': python_list(db_entry[0].ATS_ANALYSIS),
'binary_analysis': python_list(db_entry[0].BINARY_ANALYSIS),
'macho_analysis': python_dict(db_entry[0].MACHO_ANALYSIS),
'ios_api': python_dict(db_entry[0].IOS_API),
'code_analysis': python_dict(db_entry[0].CODE_ANALYSIS),
'file_analysis': python_list(db_entry[0].FILE_ANALYSIS),
'libraries': python_list(db_entry[0].LIBRARIES),
'files': python_list(db_entry[0].FILES),
'urls': python_list(db_entry[0].URLS),
'domains': python_dict(db_entry[0].DOMAINS),
'emails': python_list(db_entry[0].EMAILS),
'strings': python_list(db_entry[0].STRINGS),
'firebase_urls': python_list(db_entry[0].FIREBASE_URLS),
'appstore_details': python_dict(db_entry[0].APPSTORE_DETAILS),
}
return context
except Exception:
logger.exception('Fetching from DB')
def get_context_from_analysis(app_dict,
info_dict,
code_dict,
bin_dict,
all_files):
"""Get the context for IPA/ZIP from analysis results."""
try:
context = {
'version': settings.MOBSF_VER,
'title': 'Static Analysis',
'file_name': app_dict['file_name'],
'app_name': info_dict['bin_name'],
'app_type': bin_dict['bin_type'],
'size': app_dict['size'],
'md5': app_dict['md5_hash'],
'sha1': app_dict['sha1'],
'sha256': app_dict['sha256'],
'build': info_dict['build'],
'app_version': info_dict['bundle_version_name'],
'sdk_name': info_dict['sdk'],
'platform': info_dict['pltfm'],
'min_os_version': info_dict['min'],
'bundle_id': info_dict['id'],
'bundle_url_types': info_dict['bundle_url_types'],
'bundle_supported_platforms':
info_dict['bundle_supported_platforms'],
'icon_found': app_dict['icon_found'],
'info_plist': info_dict['plist_xml'],
'binary_info': bin_dict['bin_info'],
'permissions': info_dict['permissions'],
'ats_analysis': info_dict['inseccon'],
'binary_analysis': bin_dict['bin_code_analysis'],
'macho_analysis': bin_dict['checksec'],
'ios_api': code_dict['api'],
'code_analysis': code_dict['code_anal'],
'file_analysis': all_files['special_files'],
'libraries': bin_dict['libraries'],
'files': all_files['files_short'],
'urls': code_dict['urlnfile'],
'domains': code_dict['domains'],
'emails': code_dict['emailnfile'],
'strings': bin_dict['strings'],
'firebase_urls': code_dict['firebase'],
'appstore_details': app_dict['appstore'],
}
return context
except Exception:
logger.exception('Rendering to Template')
def save_or_update(update_type,
app_dict,
info_dict,
code_dict,
bin_dict,
all_files):
"""Save/Update an IPA/ZIP DB entry."""
try:
values = {
'FILE_NAME': app_dict['file_name'],
'APP_NAME': info_dict['bin_name'],
'APP_TYPE': bin_dict['bin_type'],
'SIZE': app_dict['size'],
'MD5': app_dict['md5_hash'],
'SHA1': app_dict['sha1'],
'SHA256': app_dict['sha256'],
'BUILD': info_dict['build'],
'APP_VERSION': info_dict['bundle_version_name'],
'SDK_NAME': info_dict['sdk'],
'PLATFORM': info_dict['pltfm'],
'MIN_OS_VERSION': info_dict['min'],
'BUNDLE_ID': info_dict['id'],
'BUNDLE_URL_TYPES': info_dict['bundle_url_types'],
'BUNDLE_SUPPORTED_PLATFORMS':
info_dict['bundle_supported_platforms'],
'ICON_FOUND': app_dict['icon_found'],
'INFO_PLIST': info_dict['plist_xml'],
'BINARY_INFO': bin_dict['bin_info'],
'PERMISSIONS': info_dict['permissions'],
'ATS_ANALYSIS': info_dict['inseccon'],
'BINARY_ANALYSIS': bin_dict['bin_code_analysis'],
'MACHO_ANALYSIS': bin_dict['checksec'],
'IOS_API': code_dict['api'],
'CODE_ANALYSIS': code_dict['code_anal'],
'FILE_ANALYSIS': all_files['special_files'],
'LIBRARIES': bin_dict['libraries'],
'FILES': all_files['files_short'],
'URLS': code_dict['urlnfile'],
'DOMAINS': code_dict['domains'],
'EMAILS': code_dict['emailnfile'],
'STRINGS': bin_dict['strings'],
'FIREBASE_URLS': code_dict['firebase'],
'APPSTORE_DETAILS': app_dict['appstore'],
}
if update_type == 'save':
db_entry = StaticAnalyzerIOS.objects.filter(
MD5=app_dict['md5_hash'])
if not db_entry.exists():
StaticAnalyzerIOS.objects.create(**values)
else:
StaticAnalyzerIOS.objects.filter(
MD5=app_dict['md5_hash']).update(**values)
except Exception:
logger.exception('Updating DB')
try:
values = {
'APP_NAME': info_dict['bin_name'],
'PACKAGE_NAME': info_dict['id'],
'VERSION_NAME': info_dict['bundle_version_name'],
}
RecentScansDB.objects.filter(
MD5=app_dict['md5_hash']).update(**values)
except Exception:
logger.exception('Updating RecentScansDB')
| gpl-3.0 | -1,673,612,552,239,371,300 | 41.316092 | 77 | 0.528317 | false | 3.674152 | false | false | false |
Shatki/PyIMU | test/magnetosphere.py | 1 | 1580 | from mpl_toolkits.mplot3d import axes3d
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from socket import *
import time
# Объявляем все глобальные переменные
HOST = '192.168.0.76'
PORT = 21566
BUFSIZ = 512
ADDR = (HOST, PORT)
bad_packet = 0
good_packet = 0
# fig, ax = plt.subplots()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Socket
# tcpCliSock = socket(AF_INET, SOCK_STREAM)
# tcpCliSock.connect(ADDR)
# Запрет на ожидание
plt.ion()
tstart = time.time()
# real-time plotting loop
X, Y, Z = [], [], []
while True:
try:
# читаем данные из сети
tcpCliSock.c
data = tcpCliSock.recv(BUFSIZ)
if data:
print(len(X), data)
data = data.decode().split(',')
if len(data) == 9:
# print('Data received', data)
# tcpCliSock.send(b'Ok')
good_packet += 1
else:
bad_packet += 1
# читаем данные из сети
data = tcpCliSock.recv(BUFSIZ)
X.append(data[0])
Y.append(data[1])
Z.append(data[2])
frame = ax.scatter(X, Y, Z, c='b', marker='o')
# Remove old line collection before drawing
#if oldcol is not None:
# ax.collections.remove(oldcol)
plt.pause(0.001 / len(X))
except KeyboardInterrupt:
tcpCliSock.close()
print('FPS: %f' % (len(X) / (time.time() - tstart)))
break
| gpl-3.0 | -736,870,472,682,010,900 | 21.328358 | 60 | 0.574866 | false | 2.888031 | false | false | false |
embray/astropy_helpers | setup.py | 1 | 2069 | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import ah_bootstrap
import pkg_resources
from setuptools import setup
from astropy_helpers.setup_helpers import register_commands, get_package_info
from astropy_helpers.version_helpers import generate_version_py
NAME = 'astropy_helpers'
VERSION = '1.1.dev'
RELEASE = 'dev' not in VERSION
DOWNLOAD_BASE_URL = 'http://pypi.python.org/packages/source/a/astropy-helpers'
generate_version_py(NAME, VERSION, RELEASE, False, uses_git=not RELEASE)
# Use the updated version including the git rev count
from astropy_helpers.version import version as VERSION
cmdclass = register_commands(NAME, VERSION, RELEASE)
# This package actually doesn't use the Astropy test command
del cmdclass['test']
setup(
name=pkg_resources.safe_name(NAME), # astropy_helpers -> astropy-helpers
version=VERSION,
description='Utilities for building and installing Astropy, Astropy '
'affiliated packages, and their respective documentation.',
author='The Astropy Developers',
author_email='[email protected]',
license='BSD',
url='http://astropy.org',
long_description=open('README.rst').read(),
download_url='{0}/astropy-helpers-{1}.tar.gz'.format(DOWNLOAD_BASE_URL,
VERSION),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Framework :: Setuptools Plugin',
'Framework :: Sphinx :: Extension',
'Framework :: Sphinx :: Theme',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Archiving :: Packaging'
],
cmdclass=cmdclass,
zip_safe=False,
**get_package_info(exclude=['astropy_helpers.tests'])
)
| bsd-3-clause | 1,800,023,496,086,649,600 | 38.037736 | 78 | 0.672789 | false | 4.129741 | false | false | false |
lainegates/DDA | loadDataTools.py | 1 | 41566 | # coding=gbk
#***************************************************************************
#* *
#* Copyright (c) 2009, 2010 *
#* Xiaolong Cheng <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCADGui
from PyQt4 import QtCore , QtGui
import Base
from Base import showErrorMessageBox
import DDADatabase
def checkFileExists(path):
import os
if not os.path.isfile(path):
showErrorMessageBox("FileError" , "File \"%s\" doesn't exist"%path)
return False
return True
class FileReader():
'''
read files , this class will omit the blank lines
'''
def __init__(self):
self.__fileName = None
self.__file = None
def setFile(self, fileName):
self.__fileName = fileName
try:
self.__file = open(self.__fileName , 'rb')
except:
showErrorMessageBox('file open error' , fileName + ' open failed')
return False
return True
def getNextLine(self):
line = self.__file.readline()
while len(line)!=0:
line = line.strip()
if len(line)==0: # blank line with '\n'
line = self.__file.readline()
else:
break # this line is not blank
if len(line)==0: # file already ends
import Base
Base.showErrorMessageBox('file error' , 'unvalid data')
raise
return line
def closeFile(self):
self.__file.close()
class Block:
def __init__(self):
self.blockIndex = 0 # the index of this block
self.startNo = 0
self.endNo = 0
self.vertices = []
self.parameters = []
self.stressX = 0
self.stressY = 0
self.stressXY = 0
self.materialNo = 0 # used in dc result
# count how many hole points are on this block
self.holePointsCount = 0
def getPoints(self):
return [(t[1],t[2],0) for t in self.vertices]
def visible(self):
if self.holePointsCount>0:
return False
elif self.holePointsCount==0:
return True
else :
raise Exception('unvalid value %f'% self.holePointsCount)
class DDALine:
def __init__(self , p1 , p2 , materialNo):
self.startPoint = p1
self.endPoint = p2
self.materialNo = materialNo
self.visible = True
class BoltElement(DDALine):
def __init__(self , p1 , p2 , e , t , f):
DDALine.__init__(self, p1, p2, 0)
self.e = e
self.t = t
self.f = f
class DDAPolyLine:
def __init__(self , pts , materialNo):
self.pts = pts
self.materialNo = materialNo
self.visible = True
class DDAPoint:
def __init__(self , x=0 , y=0):
self.x = x
self.y = y
self.Xspeed = 0
self.Yspeed = 0
self.blockNo = 0
self.visible = True
class FixedPoint(DDAPoint):
pass
class LoadingPoint(DDAPoint):
pass
class MeasuredPoint(DDAPoint):
def __init__(self):
DDAPoint.__init__(self)
self.u = 0
self.v = 0
self.r = 0
self.stressX = 0
self.stressY = 0
self.stressXY = 0
class HolePoint(DDAPoint):
pass
class Graph:
def __init__(self):
self.blocks = []
self.fixedPoints = []
self.measuredPoints = []
self.loadingPoints = []
self.holePoints = []
self.boltElements = []
def reset(self):
self.blocks = []
self.fixedPoints = []
self.measuredPoints = []
self.loadingPoints = []
self.boltElements = []
class BaseParseData():
'''
parse data loaded , data may be DL data , DC data etc.
'''
def parse(self , filename):
'''
abstract function , overwrited by subclass
'''
pass
def parseFloatNum(self , numStr , itemName='None'):
try:
num = float(numStr)
except:
try:
num = int(numStr)
except:
showErrorMessageBox( 'InputError' , itemName + ' should be a float number')
return None
return num
def parseIntNum(self , numStr , itemName='None'):
try:
num = int(numStr)
except:
showErrorMessageBox( 'InputError' , itemName + ' should be a integer')
return None
return num
class ParseAndLoadDLData(BaseParseData):
'''
parse DL data
'''
def __init__(self):
self.reset()
self.__fileReader = FileReader()
def GetResources(self):
return {
'Pixmap' : 'LoadDLInput',
'MenuText': 'LoadDCInputData',
'ToolTip': "Load DC Input Data"}
def Activated(self):
from Base import __currentProjectPath__
if self.parse(__currentProjectPath__ + '/data.dl'):
self.save2Database()
import Base
Base.changeStep4Stage('ShapesAvailable')
def reset(self):
self.checkStatus = False
self.__miniLength = 0
self.__jointSetNum = 0
self.__boundaryNodeNum = 0
self.__tunnelNum = 0
self.__addtionalLineNum = 0
self.__materialLineNum = 0
self.__boltElementNum = 0
self.__fixedPointNum = 0
self.__loadingPointNum = 0
self.__measuredPointNum = 0
self.__holePointNum = 0
self.__jointSets = []
self.__slope = []
self.__boundaryNodes = []
self.__tunnels = []
self.__additionalLines = []
self.__materialLines = []
self.__boltElements = []
self.__fixedPoints = []
self.__loadingPoints = []
self.__measuredPoints = []
self.__holePoints = []
def parse(self , filename ):
'''
parse DL data
:param filename: the data file name
'''
self.reset()
if not self.__fileReader.setFile(filename):
return False
if not self.__parsePandect():
return False
if not self.__parseJointSets():
return False
if not self.__parseBoundaryNodes():
return False
if not self.__parseTunnels():
return False
if not self.__parseLines():
return False
if not self.__parsePoints():
return False
self.__fileReader.closeFile()
return True
def __parseJointSets(self):
'''
parse joint sets
'''
# joint dip , dip direction
for i in range(self.__jointSetNum):
self.__jointSets.append(range(6))
tmpNums = self.__jointSets[-1]
str = self.__fileReader.getNextLine()
nums = str.strip().split()
tmpNums[0] = self.parseFloatNum(nums[0], 'joint dip')
tmpNums[1] = self.parseFloatNum(nums[1], 'dip direction')
if tmpNums[0] == None or tmpNums[1] == None :
return False
print 'joint %d : ( %f , %f)'%( i , tmpNums[0],tmpNums[1])
# slope dip , dip direction
tmpNumbers = [0 , 1]
str = self.__fileReader.getNextLine()
nums = str.strip().split()
tmpNumbers[0] = self.parseFloatNum(nums[0], 'slope dip')
tmpNumbers[1] = self.parseFloatNum(nums[1], 'dip direction')
if tmpNumbers[0] == None or tmpNumbers[1] == None :
return False
print 'slope : ( %f , %f)'%(tmpNumbers[0],tmpNumbers[1])
self.__slope.append((tmpNumbers[0],tmpNumbers[1]))
for i in range(self.__jointSetNum):
tmpNums = self.__jointSets[i]
str = self.__fileReader.getNextLine()
nums = str.strip().split()
tmpNums[2] = self.parseFloatNum(nums[0], 'spacing')
tmpNums[3] = self.parseFloatNum(nums[1], 'length')
tmpNums[4] = self.parseFloatNum(nums[2], 'bridge')
tmpNums[5] = self.parseFloatNum(nums[3], 'random')
if tmpNums[2] == None or tmpNums[3] == None or tmpNums[4] == None or tmpNums[5] == None :
return False
print 'joint %d parameter : ( %f , %f , %f , %f)'%(i , tmpNums[2],tmpNums[3],tmpNums[4],tmpNums[5])
return True
def __parseBoundaryNodes(self ):
'''
parse boundary nodes
'''
for i in range(self.__boundaryNodeNum):
str = self.__fileReader.getNextLine()
nums = str.strip().split()
tmpNums = [0 , 1 , 0]
tmpNums[0] = self.parseFloatNum(nums[0], 'coordinate number')
tmpNums[1] = self.parseFloatNum(nums[1], 'coordinate number')
if tmpNums[0] == None or tmpNums[1] == None :
return False
print 'boundary line %d : (%f , %f)'%(i , tmpNums[0] , tmpNums[1])
self.__boundaryNodes.append(tmpNums)
return True
def __parseTunnels(self ):
'''
parse tunnels
'''
for i in range(self.__tunnelNum):
# tunnel shape number
str = self.__fileReader.getNextLine()
shapeNo = self.parseIntNum(str, 'tunnel shape number')
if shapeNo == None :
return False
# tunnel a b c r
tmpNums = range(4)
str = self.__fileReader.getNextLine()
names = ['a' , 'b' , 'c' , 'r']
nums = str.strip().split()
for j in range(4):
tmpNums[j] = self.parseFloatNum(nums[j], 'tunnel ' +names[j])
if tmpNums[j] == None :
return False
# tunnel center
center = [0 , 1]
str = self.__fileReader.getNextLine()
nums = str.strip().split()
for j in range(2):
center[j] = self.parseFloatNum(nums[j], 'tunnel center number')
if center[j] == None :
return False
print 'tunnel %d : (%f , %f , %f , %f , %f , %f , %f)'%(i , shapeNo , tmpNums[0] , tmpNums[1] , tmpNums[2] , tmpNums[3] , center[0] , center[1])
self.__tunnels.append((shapeNo , tmpNums[0] , tmpNums[1] , tmpNums[2] , tmpNums[3] , center[0] , center[1]))
return True
def __parseLines(self ):
'''
parse material lines , addtional lines
'''
tmpNums = range(4)
# additional line
for i in range(self.__addtionalLineNum):
str = self.__fileReader.getNextLine()
nums = str.strip().split()
for j in range(4):
tmpNums[j] = self.parseFloatNum(nums[j], 'additional line coordinate number')
if tmpNums[j] == None :
return False
materialNo = self.parseFloatNum(nums[4], 'additional line material number')
if materialNo == None :
return False
print 'additional line %d :(%f , %f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , materialNo)
self.__additionalLines.append((tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , materialNo))
# material line
for i in range(self.__materialLineNum):
str = self.__fileReader.getNextLine()
nums = str.strip().split()
for j in range(4):
tmpNums[j] = self.parseFloatNum(nums[j], 'material line coordinate number')
if tmpNums[j] == None :
return False
materialNo = self.parseFloatNum(nums[4], 'block material number')
if materialNo == None :
return False
print 'block material %d :(%f , %f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , materialNo)
self.__materialLines.append((tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , materialNo))
# bolt elements
for i in range(self.__boltElementNum):
str = self.__fileReader.getNextLine()
nums = str.strip().split()
for j in range(4):
tmpNums[j] = self.parseFloatNum(nums[j], 'bolt element coordinate number')
if tmpNums[j] == None :
return False
e0 = self.parseFloatNum(nums[4], 'bolt element e0')
t0 = self.parseFloatNum(nums[5], 'bolt element t0')
f0 = self.parseFloatNum(nums[6], 'bolt element f0')
if materialNo == None :
return False
print 'block material %d :(%f , %f , %f , %f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , e0 , t0 , f0)
self.__boltElements.append((tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , e0 , t0 , f0))
return True
def __parsePoints(self):
'''
parse points , fixed points , loading points , measured points , hole points
:param file: input dl file
'''
tmpNums = range(4)
# fixed points
for i in range(self.__fixedPointNum):
str = self.__fileReader.getNextLine()
nums = str.strip().split()
for j in range(4):
tmpNums[j] = self.parseFloatNum(nums[j], 'fixed point coordinate number')
if tmpNums[j] == None :
return False
print 'fixed line %d : (%f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3])
self.__fixedPoints.append((tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3]))
# measured points
itemNames = ['loading point' , 'measured point' , 'hole point']
realNums = [self.__loadingPointNum , self.__measuredPointNum , self.__holePointNum]
for k in range(len(itemNames)):
for i in range(realNums[k]):
str = self.__fileReader.getNextLine()
nums = str.strip().split()
for j in range(2):
tmpNums[j] = self.parseFloatNum(nums[j], itemNames[k] +' coordinate number')
if tmpNums[j] == None :
return False
print '%s %d : (%f , %f)'%(itemNames[k] , i , tmpNums[0] , tmpNums[1])
if k==0 : self.__loadingPoints.append((tmpNums[0] , tmpNums[1]))
elif k==1 : self.__measuredPoints.append((tmpNums[0] , tmpNums[1]))
elif k==2 : self.__holePoints.append((tmpNums[0] , tmpNums[1]))
return True
def __parsePandect(self):
'''
parse Numbers , for example , number of joint set
'''
self.__miniLength = self.parseFloatNum(self.__fileReader.getNextLine(), 'minimun edge length')
if self.__miniLength == None :
return False
self.__jointSetNum = self.parseIntNum(self.__fileReader.getNextLine(), 'joint set number')
if self.__jointSetNum == None:
return False
self.__boundaryNodeNum = self.parseIntNum(self.__fileReader.getNextLine(), 'boundary line number')
if self.__boundaryNodeNum == None:
return False
self.__tunnelNum = self.parseIntNum(self.__fileReader.getNextLine(), 'tunnel number')
if self.__tunnelNum == None:
return False
self.__addtionalLineNum = self.parseIntNum(self.__fileReader.getNextLine(), 'additional line number')
if self.__addtionalLineNum == None:
return False
self.__materialLineNum = self.parseIntNum(self.__fileReader.getNextLine(), 'material line number')
if self.__materialLineNum == None:
return False
self.__boltElementNum = self.parseIntNum(self.__fileReader.getNextLine(), 'bolt element number')
if self.__boltElementNum == None:
return False
self.__fixedPointNum = self.parseIntNum(self.__fileReader.getNextLine(), 'fixed point number')
if self.__fixedPointNum == None:
return False
self.__loadingPointNum = self.parseIntNum(self.__fileReader.getNextLine(), 'loading point number')
if self.__loadingPointNum == None:
return False
self.__measuredPointNum = self.parseIntNum(self.__fileReader.getNextLine(), 'measured point number')
if self.__measuredPointNum == None:
return False
self.__holePointNum = self.parseIntNum(self.__fileReader.getNextLine(), 'hole point number')
if self.__holePointNum == None:
return False
return True
def save2Database(self):
'''
save data to DDADatabase.dl_database
'''
from DDAShapes import DDAJointSets , DDATunnels
DDADatabase.dl_database = DDADatabase.DLDatabase()
database = DDADatabase.dl_database
database.jointSets = self.__jointSets
DDAJointSets.dataTable.refreshData(database.jointSets)
database.slope = self.__slope
DDAJointSets.slopeDataTable.refreshData(database.slope)
database.tunnels = self.__tunnels
DDATunnels.dataTable.refreshData(database.tunnels)
# boundaryNodes
pts = [tuple(p) for p in self.__boundaryNodes]
pts.append(pts[0])
database.boundaryNodes = [DDAPolyLine( pts, 1)]
# additional lines
database.additionalLines = \
[DDALine((p[0],p[1],0) , (p[2],p[3],0) , p[4]) for p in self.__additionalLines]
# material line
database.materialLines = \
[DDALine((p[0],p[1],0) , (p[2],p[3],0) , p[4]) for p in self.__materialLines]
# bolt element
database.boltElements = \
[BoltElement((p[0],p[1],0) , (p[2],p[3],0) , p[4] , p[5] , p[6]) for p in self.__boltElements]
# points
database.fixedPoints = [DDAPoint(t[0],t[1]) for t in self.__fixedPoints]
database.loadingPoints = [DDAPoint(t[0],t[1]) for t in self.__loadingPoints]
database.measuredPoints = [DDAPoint(t[0],t[1]) for t in self.__measuredPoints]
database.holePoints = [DDAPoint(t[0],t[1]) for t in self.__holePoints]
self.reset()
import Base
Base.refreshAllShapes()
class ParseDFInputParameters(BaseParseData):
def __init__(self):
self.__file = None
self.reset()
def reset(self):
from DDADatabase import df_inputDatabase
self.paras = df_inputDatabase.paras
self.paras.reset()
def __parseParaSchema(self):
'''
parse parameters from DF parameters file
:param infile:
'''
for i in range(7):
line = self.__file.getNextLine()
t =self.parseFloatNum(line)
if t==None: return False
if i==0: self.paras.ifDynamic = float(t)
elif i==1: self.paras.stepsNum = int(t)
elif i==2: self.paras.blockMatsNum = int(t)
elif i==3: self.paras.jointMatsNum = int(t)
elif i==4: self.paras.ratio = t
elif i==5: self.paras.OneSteptimeLimit = int(t)
else: self.paras.springStiffness = int(t)
print 'DF Para : IfDynamic: %d steps: %d blockMats: %d JointMats: %d Ratio: %f timeInterval: %d stiffness: %d'\
%(self.paras.ifDynamic, self.paras.stepsNum , self.paras.blockMatsNum , self.paras.jointMatsNum \
, self.paras.ratio, self.paras.OneSteptimeLimit, self.paras.springStiffness)
print 'Df parameters schema done'
return True
def __parsePointsParameters(self):
'''
parse parameters for fixed points and loading points
:param infile:
'''
# parse fixed points and loading points' type 0 : fixed points , 2: loading points
# fixed points
from DDADatabase import df_inputDatabase
if len(df_inputDatabase.fixedPoints)>0:
line = self.__file.getNextLine()
nums = line.split()
for i in nums:
if self.parseIntNum(i)==None :
return False
print nums
# loading points
if len(df_inputDatabase.loadingPoints)>0:
line = self.__file.getNextLine()
nums = line.split()
for i in nums:
if self.parseIntNum(i)==None :
return False
print nums
# parse loading points parameters (starttime , stressX , stressY , endtime , stressX , stressY)
for i in range(len(df_inputDatabase.loadingPoints)):
digits = [1]*6
line1 = self.__file.getNextLine()
nums1 = line1.split()
line2 = self.__file.getNextLine()
nums2 = line2.split()
for j in range(3):
digits[j] = self.parseIntNum(nums1[j])
digits[j+3] = self.parseIntNum(nums2[j])
if None in digits:
return False
self.paras.loadingPointMats.append(digits)
print nums1 , nums2
print 'fixed points and loading points done.'
return True
def __parseBlocksAndJointsPara(self):
'''
parse parameters for blocks and joints'
:param infile:
'''
for i in range(self.paras.blockMatsNum):
digits = [1]*14
line1 = self.__file.getNextLine()
nums1 = line1.split()
for j in range(5):
digits[j] = self.parseFloatNum(nums1[j])
line2 = self.__file.getNextLine()
nums2 = line2.split()
line3 = self.__file.getNextLine()
nums3 = line3.split()
line4 = self.__file.getNextLine()
nums4 = line4.split()
for j in range(3):
digits[j+5] = self.parseFloatNum(nums2[j])
digits[j+8] = self.parseFloatNum(nums3[j])
digits[j+11] = self.parseFloatNum(nums4[j])
if None in digits:
return False
self.paras.blockMats.append(digits)
print digits
for i in range(self.paras.jointMatsNum):
digits = [1]*3
line = self.__file.getNextLine()
nums = line.split()
for j in range(3):
digits[j] = self.parseFloatNum(nums[j])
if None in digits:
return False
self.paras.jointMats.append(digits)
print digits
print 'DF blocks and block vertices\' parameters done.'
return True
def __parseRestPara(self ):
'''
parse SOR and axes
:param infile:
'''
# parse SOR
line = self.__file.getNextLine()
self.paras.SOR = self.parseFloatNum(line)
if self.paras.SOR==None: return False
print 'SOR : ' , self.paras.SOR
line = self.__file.getNextLine()
nums = line.split()
for i in range(3):
if self.parseFloatNum(nums[i])==None:
return False
print nums
print 'DF parameters all done.'
return True
def parse(self , path = None):
self.reset()
if not path: Base.__currentProjectPath__+'/parameters.df'
if not checkFileExists(path):
return False
import Base
self.__file = FileReader()
self.__file.setFile(path)
if not self.__parseParaSchema() or not self.__parsePointsParameters() \
or not self.__parseBlocksAndJointsPara() or not self.__parseRestPara():
return False
return True
class ParseDFInputGraphData(BaseParseData):
def __init__(self):
self.__fileReader = None
def GetResources(self):
return {
'Pixmap' : 'LoadDFInput',
'MenuText': 'LoadDFInputData',
'ToolTip': "Load DF Input Data"}
def Activated(self):
self.parse()
import Base
Base.changeStep4Stage('ShapesAvailable')
def finish(self):
pass
def parse(self , path=None):
self.refreshBlocksData()
import Base
if not path : path = Base.__currentProjectPath__+'/data.df'
if not checkFileExists(path):
return False
file = open(path , "rb")
if not self.__parseDataSchema(file) or not self.__parseBlocks(file) or \
not self.__parseBlockVertices(file) or not self.__parseBoltElements(file) \
or not self.__parsePoints(file):
Base.showErrorMessageBox("DataError", 'Data input unvalid')
return False
return True
def refreshBlocksData(self):
import Base
self.graph = Base.getDatabaser4CurrentStage()
self.graph.reset()
self.blocksNum = 0
self.blockVerticesNum = 0
self.fixedPointsNum = 0
self.loadingPointsNum = 0
self.measuredPointsNum = 0
self.boltElementsNum = 0
def __parseDataSchema(self , infile):
line = infile.readline()
nums = line.split()
self.blocksNum = self.parseIntNum(nums[0])
self.boltElementsNum = self.parseIntNum(nums[1])
self.blockVerticesNum = self.parseIntNum(nums[2])
line = infile.readline()
nums = line.split()
self.fixedPointsNum = self.parseIntNum(nums[0])
self.loadingPointsNum = self.parseIntNum(nums[1])
self.measuredPointsNum = self.parseIntNum(nums[2])
if None in [self.blocksNum , self.boltElementsNum , self.blockVerticesNum \
, self.fixedPointsNum , self.loadingPointsNum , self.measuredPointsNum]:
return False
print 'DF data : blocks : %d bolts : %d vertices : %d fixed Pnts :%d LoadingPnts :%d MeasuredPnts: %d' \
%(self.blocksNum , self.boltElementsNum , self.blockVerticesNum \
, self.fixedPointsNum , self.loadingPointsNum , self.measuredPointsNum)
return True
def __parseBlocks(self , infile):
'''
parsing blocks and try to get the maximum material No
:param infile:
'''
from DDADatabase import df_inputDatabase
df_inputDatabase.blockMatCollections = set()
blockMatCollection = df_inputDatabase.blockMatCollections
for i in range(0 , self.blocksNum):
line = infile.readline()
nums = line.split()
# get blocks' vertices' material No
t0 = self.parseIntNum(nums[0])
t1 = self.parseIntNum(nums[1])
t2 = self.parseIntNum(nums[2])
if t0==None or t1==None or t2==None:
return False
tmpB = Block()
tmpB.materialNo = t0
tmpB.startNo = t1
tmpB.endNo = t2
blockMatCollection.add(t0)
self.graph.blocks.append(tmpB )
# print line ,
print 'DF blocks Info done.'
return True
def __parseBlockVertices(self,infile):
'''
parsing blocks' vertices and try to get the maximum material No
:param infile:
'''
from DDADatabase import df_inputDatabase
df_inputDatabase.jointMatCollections =set()
jointMatCollection = df_inputDatabase.jointMatCollections
ptsBounds = range(4)
for i in range(self.blocksNum):
tmpB = self.graph.blocks[i]
for j in range(int(tmpB.endNo) - int(tmpB.startNo) +1): # read blocks vertices
line = infile.readline()
# print line
nums = line.split()
# get joint material No
t0 = int(self.parseFloatNum(nums[0]))
t1 = self.parseFloatNum(nums[1])
t2 = self.parseFloatNum(nums[2])
if t0==None or t1==None or t2==None:
return False
tmpB.vertices.append( (t0,t1,t2) )
jointMatCollection.add(t0)
# get vertices' value boundary
if i==0:
ptsBounds[0]=ptsBounds[1] = t1
ptsBounds[2]=ptsBounds[2] = t2
else:
if t1<ptsBounds[0]: ptsBounds[0]=t1
elif t1>ptsBounds[1]: ptsBounds[1]=t1
elif t2<ptsBounds[2]: ptsBounds[2]=t2
elif t2>ptsBounds[3]: ptsBounds[3]=t2
for i in range(4): # block parameters
line = infile.readline()
# print line
nums = line.split()
t0 = self.parseFloatNum(nums[0])
t1 = self.parseFloatNum(nums[1])
t2 = self.parseFloatNum(nums[2])
if t0==None or t1==None or t2==None:
return False
tmpB.parameters.extend([t0,t1,t2])
import Base
margin = ptsBounds[1]-ptsBounds[0]
if margin > (ptsBounds[3]-ptsBounds[2]):
margin = ptsBounds[3]-ptsBounds[2]
Base.__radius4Points__ = margin/60
print 'DF blocks vertices data done.'
return True
def __parseBoltElements(self , infile):
for i in range(self.boltElementsNum):
for j in range(3):
line = infile.readline()
print ' %d bolt elements parsed done'%self.boltElementsNum
return True
def parse1Point(self , line , point):
#print line ,
nums = line.split()
point.x = self.parseFloatNum(nums[0])
point.y = self.parseFloatNum(nums[1])
point.blockNo = int(self.parseFloatNum(nums[2]))
def __parsePoints(self , infile):
'''
parsing fixed , loading , and measured points
:param infile:
'''
for i in range(self.fixedPointsNum):
pnt = FixedPoint()
line = infile.readline()
self.parse1Point(line , pnt)
self.graph.fixedPoints.append(pnt)
print ' fixed points : %d done'%self.fixedPointsNum
for i in range(self.loadingPointsNum):
pnt = LoadingPoint()
line = infile.readline()
self.parse1Point(line , pnt)
self.graph.loadingPoints.append(pnt)
print ' loading points : %d done'%self.loadingPointsNum
for i in range(self.measuredPointsNum):
pnt = MeasuredPoint()
line = infile.readline()
self.parse1Point(line , pnt)
self.graph.measuredPoints.append(pnt)
print ' measured points : %d done'%self.measuredPointsNum
print 'DF points done.'
return True
class ParseAndLoadDCInputData(BaseParseData):
def __init__(self):
self.reset()
self.__fileReader = FileReader()
self.database = None
def GetResources(self):
return {
'Pixmap' : 'LoadDCInput',
'MenuText': 'LoadDCInputData',
'ToolTip': "Load DC Input Data"}
def Activated(self):
self.parse()
import Base
Base.changeStep4Stage('SpecialStep')
import Base
database = Base.getDatabaser4CurrentStage()
database.clearRedoUndoList()
def finish(self):
pass
def reset(self):
self.jointLinesNum = 0
self.materialLinesNum = 0
self.additionalLinesNum = 0
self.boltElementsNum = 0
self.fixedPointsNum = 0
self.loadingPointsNum = 0
self.measuredPointsNum = 0
self.holePointsNum = 0
def __ParsePandect(self):
# from DDADatabase import dc_inputDatabase
self.__fileReader.getNextLine() # minimum edge length e0
nums = self.__fileReader.getNextLine().split()
self.jointLinesNum = self.parseIntNum(nums[0])
# temperary code, I will try to revise this if I fully understand the data.dc
self.database.boundaryLinesNum = self.parseIntNum(nums[1])
nums = self.__fileReader.getNextLine()
self.materialLinesNum = self.parseIntNum(nums)
nums = self.__fileReader.getNextLine()
self.boltElementsNum = self.parseIntNum(nums)
nums = self.__fileReader.getNextLine()
self.fixedPointsNum = self.parseIntNum(nums)
nums = self.__fileReader.getNextLine()
self.loadingPointsNum = self.parseIntNum(nums)
nums = self.__fileReader.getNextLine()
self.measuredPointsNum = self.parseIntNum(nums)
nums = self.__fileReader.getNextLine()
self.holePointsNum = self.parseIntNum(nums)
def __parseLines(self):
# from DDADatabase import dc_inputDatabase
# joint lines
self.database.jointLines = []
for i in range(self.jointLinesNum):
nums = self.__fileReader.getNextLine().split()
jointMaterial = int(self.parseFloatNum(nums[4]))
p1 = ( self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1]) , 0 )
p2 = ( self.parseFloatNum(nums[2]) , self.parseFloatNum(nums[3]) , 0 )
self.database.jointLines.append(DDALine(p1 , p2 , jointMaterial))
# material lines
self.database.materialLines = []
for i in range(self.materialLinesNum):
self.__fileReader.getNextLine()
# bolt elements
tmpNums = range(4)
self.database.boltElements = []
for i in range(self.boltElementsNum):
nums = self.__fileReader.getNextLine().split()
for j in range(4):
tmpNums[j] = self.parseFloatNum(nums[j], 'bolt element coordinate number')
if tmpNums[j] == None :
return False
e0 = self.parseFloatNum(nums[4], 'bolt element e0')
t0 = self.parseFloatNum(nums[5], 'bolt element t0')
f0 = self.parseFloatNum(nums[6], 'bolt element f0')
if e0==None or t0==None or f0==None :
return False
print 'block material %d :(%f , %f , %f , %f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , e0 , t0 , f0)
self.database.boltElements.append(BoltElement(tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , e0 , t0 , f0))
def __parsePoints(self):
# from DDADatabase import dc_inputDatabase
import Base
# fixed points
windowInfo = [0 , 0 , 0 , 0]
nums = self.__fileReader.getNextLine().split()
p = (self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1]) , 0)
self.database.fixedPoints.append( FixedPoint(p[0] , p[1]))
windowInfo[0] = windowInfo[1] = p[0]
windowInfo[2] = windowInfo[3] = p[1]
for i in range(self.fixedPointsNum-1):
nums = self.__fileReader.getNextLine().split()
p = (self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1]) , 0)
if p[0]<windowInfo[0]:windowInfo[0] = p[0]
if p[0]>windowInfo[1]:windowInfo[1] = p[0]
if p[1]<windowInfo[2]:windowInfo[2] = p[1]
if p[1]>windowInfo[3]:windowInfo[3] = p[1]
self.database.fixedPoints.append( FixedPoint(p[0] , p[1]))
Base.__radius4Points__ = (windowInfo[1] - windowInfo[0]) * 0.01
Base.__windowInfo__ = windowInfo
# loading points
for i in range(self.loadingPointsNum):
nums = self.__fileReader.getNextLine().split()
self.database.loadingPoints.append( \
LoadingPoint(self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1])))
# measured points
for i in range(self.measuredPointsNum):
nums = self.__fileReader.getNextLine().split()
self.database.measuredPoints.append( \
MeasuredPoint(self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1])))
# hole points
for i in range(self.holePointsNum):
nums = self.__fileReader.getNextLine().split()
self.database.holePoints.append( \
HolePoint(self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1])))
def parse(self):
import Base
filename = Base.__currentProjectPath__ + '/data.dc'
print 'try to read DC data from file : ' , filename
# filename = Base.__currentProjectPath__ + '/tmpData.dc'
self.__fileReader.setFile(filename)
import DDADatabase
self.database = DDADatabase.DCInputDatabase()
self.reset()
self.__ParsePandect()
self.__parseLines()
self.__parsePoints()
self.__fileReader.closeFile()
DDADatabase.dc_inputDatabase = self.database
self.database = None
class DDALoadData():
def __init__(self):
self.current_path = Base.__currentProjectPath__
def changeStage( self ):
if Base.__currentStage__ == 'DL': # DL stage
print 'switch to DL stage'
self.parseData = ParseAndLoadDLData()
elif Base.__currentStage__ == 'DC': # DC stage
pass
def GetResources(self):
return {
'MenuText': 'Load',
'ToolTip': "Load DL data."}
def __storeFileName(self , filename):
'''
store the name of file which is being loaded
'''
file = open(self.current_path+'\\Ff.c' , 'wb')
file.write(filename.strip().split('/')[-1])
file.close()
def __confirmLoadFile(self):
'''
if a new data file loaded , old shapes will be cleared , so before this ,we have to make sure if user want to do this.
'''
box = QtGui.QMessageBox()
box.setText('New data will be imported , and old shapes will be wipped.')
box.setInformativeText('Do you want to do this?')
box.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)
box.setDefaultButton(QtGui.QMessageBox.Ok)
ret = box.exec_()
if ret == QtGui.QMessageBox.Ok:
return True
return False
def Activated(self):
self.changeStage()
filename = str( QtGui.QFileDialog.getOpenFileName(None , 'please select input file' , self.current_path) )
if not self.parseData.parse(filename):
self.parseData.reset()
print 'input data status : invalid'
return False
print 'input data status : ok'
if self.__confirmLoadFile():
self.__storeFileName(filename)
self.parseData.save2Database()
FreeCADGui.DDADisplayCmd.preview()
def finish(self):
pass
FreeCADGui.addCommand('DDA_LoadDLInputData', ParseAndLoadDLData())
FreeCADGui.addCommand('DDA_Load', DDALoadData())
FreeCADGui.addCommand('DDA_LoadDCInputData', ParseAndLoadDCInputData())
FreeCADGui.addCommand('DDA_LoadDFInputGraphData', ParseDFInputGraphData()) | lgpl-2.1 | -3,771,634,351,096,124,000 | 36.414041 | 156 | 0.529423 | false | 3.983707 | false | false | false |
TheDSCPL/SSRE_2017-2018_group8 | Projeto/Python/cryptopy/crypto/cipher/rijndael.py | 1 | 14718 | # -*- coding: utf-8 -*-
""" crypto.cipher.rijndael
Rijndael encryption algorithm
This byte oriented implementation is intended to closely
match FIPS specification for readability. It is not implemented
for performance.
Copyright © (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
2002-06-01
"""
from crypto.cipher.base import BlockCipher, padWithPadLen, noPadding
class Rijndael(BlockCipher):
""" Rijndael encryption algorithm """
def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ):
self.name = 'RIJNDAEL'
self.keySize = keySize
self.strength = keySize*8
self.blockSize = blockSize # blockSize is in bytes
self.padding = padding # change default to noPadding() to get normal ECB behavior
assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes'
assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes'
self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words
self.Nk = keySize/4 # Nk is the key length in 32-bit words
self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of
# the block (Nb) and key (Nk) sizes.
if key != None:
self.setKey(key)
def setKey(self, key):
""" Set a key and generate the expanded key """
assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter'
self.__expandedKey = keyExpansion(self, key)
self.reset() # BlockCipher.reset()
def encryptBlock(self, plainTextBlock):
""" Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """
self.state = self._toBlock(plainTextBlock)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
for round in range(1,self.Nr): #for round = 1 step 1 to Nr1
SubBytes(self)
ShiftRows(self)
MixColumns(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
SubBytes(self)
ShiftRows(self)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
return self._toBString(self.state)
def decryptBlock(self, encryptedBlock):
""" decrypt a block (array of bytes) """
self.state = self._toBlock(encryptedBlock)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
for round in range(self.Nr-1,0,-1):
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
InvMixColumns(self)
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
return self._toBString(self.state)
def _toBlock(self, bs):
""" Convert binary string to array of bytes, state[col][row]"""
assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize'
return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)]
def _toBString(self, block):
""" Convert block (array of bytes) to binary string """
l = []
for col in block:
for rowElement in col:
l.append(chr(rowElement))
return ''.join(l)
#-------------------------------------
""" Number of rounds Nr = NrTable[Nb][Nk]
Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8
------------------------------------- """
NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14},
5: {4:11, 5:11, 6:12, 7:13, 8:14},
6: {4:12, 5:12, 6:12, 7:13, 8:14},
7: {4:13, 5:13, 6:13, 7:13, 8:14},
8: {4:14, 5:14, 6:14, 7:14, 8:14}}
#-------------------------------------
def keyExpansion(algInstance, keyString):
""" Expand a string of size keySize into a larger array """
Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability
key = [ord(byte) for byte in keyString] # convert string to list
w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)]
for i in range(Nk,Nb*(Nr+1)):
temp = w[i-1] # a four byte column
if (i%Nk) == 0 :
temp = temp[1:]+[temp[0]] # RotWord(temp)
temp = [ Sbox[byte] for byte in temp ]
temp[0] ^= Rcon[i/Nk]
elif Nk > 6 and i%Nk == 4 :
temp = [ Sbox[byte] for byte in temp ] # SubWord(temp)
w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] )
return w
Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!!
0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6,
0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91)
#-------------------------------------
def AddRoundKey(algInstance, keyBlock):
""" XOR the algorithm state with a block of key material """
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] ^= keyBlock[column][row]
#-------------------------------------
def SubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = Sbox[algInstance.state[column][row]]
def InvSubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = InvSbox[algInstance.state[column][row]]
Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,
0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,
0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,
0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,
0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,
0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,
0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,
0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,
0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,
0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,
0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,
0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,
0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,
0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,
0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,
0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,
0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16)
InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,
0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb,
0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,
0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb,
0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,
0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e,
0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,
0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25,
0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,
0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92,
0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,
0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84,
0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,
0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06,
0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,
0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b,
0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea,
0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73,
0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85,
0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e,
0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89,
0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b,
0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20,
0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4,
0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31,
0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f,
0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d,
0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef,
0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0,
0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61,
0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26,
0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d)
#-------------------------------------
""" For each block size (Nb), the ShiftRow operation shifts row i
by the amount Ci. Note that row 0 is not shifted.
Nb C1 C2 C3
------------------- """
shiftOffset = { 4 : ( 0, 1, 2, 3),
5 : ( 0, 1, 2, 3),
6 : ( 0, 1, 2, 3),
7 : ( 0, 1, 2, 4),
8 : ( 0, 1, 3, 4) }
def ShiftRows(algInstance):
tmp = [0]*algInstance.Nb # list of size Nb
for r in range(1,4): # row 0 reamains unchanged and can be skipped
for c in range(algInstance.Nb):
tmp[c] = algInstance.state[(c+shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
for c in range(algInstance.Nb):
algInstance.state[c][r] = tmp[c]
def InvShiftRows(algInstance):
tmp = [0]*algInstance.Nb # list of size Nb
for r in range(1,4): # row 0 reamains unchanged and can be skipped
for c in range(algInstance.Nb):
tmp[c] = algInstance.state[(c+algInstance.Nb-shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
for c in range(algInstance.Nb):
algInstance.state[c][r] = tmp[c]
#-------------------------------------
def MixColumns(a):
Sprime = [0,0,0,0]
for j in range(a.Nb): # for each column
Sprime[0] = mul(2,a.state[j][0])^mul(3,a.state[j][1])^mul(1,a.state[j][2])^mul(1,a.state[j][3])
Sprime[1] = mul(1,a.state[j][0])^mul(2,a.state[j][1])^mul(3,a.state[j][2])^mul(1,a.state[j][3])
Sprime[2] = mul(1,a.state[j][0])^mul(1,a.state[j][1])^mul(2,a.state[j][2])^mul(3,a.state[j][3])
Sprime[3] = mul(3,a.state[j][0])^mul(1,a.state[j][1])^mul(1,a.state[j][2])^mul(2,a.state[j][3])
for i in range(4):
a.state[j][i] = Sprime[i]
def InvMixColumns(a):
""" Mix the four bytes of every column in a linear way
This is the opposite operation of Mixcolumn """
Sprime = [0,0,0,0]
for j in range(a.Nb): # for each column
Sprime[0] = mul(0x0E,a.state[j][0])^mul(0x0B,a.state[j][1])^mul(0x0D,a.state[j][2])^mul(0x09,a.state[j][3])
Sprime[1] = mul(0x09,a.state[j][0])^mul(0x0E,a.state[j][1])^mul(0x0B,a.state[j][2])^mul(0x0D,a.state[j][3])
Sprime[2] = mul(0x0D,a.state[j][0])^mul(0x09,a.state[j][1])^mul(0x0E,a.state[j][2])^mul(0x0B,a.state[j][3])
Sprime[3] = mul(0x0B,a.state[j][0])^mul(0x0D,a.state[j][1])^mul(0x09,a.state[j][2])^mul(0x0E,a.state[j][3])
for i in range(4):
a.state[j][i] = Sprime[i]
#-------------------------------------
def mul(a, b):
""" Multiply two elements of GF(2^m)
needed for MixColumn and InvMixColumn """
if (a !=0 and b!=0):
return Alogtable[(Logtable[a] + Logtable[b])%255]
else:
return 0
Logtable = ( 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3,
100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193,
125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120,
101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142,
150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56,
102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16,
126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186,
43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87,
175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232,
44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160,
127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183,
204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157,
151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209,
83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171,
68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165,
103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7)
Alogtable= ( 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1)
| mit | 4,782,612,339,069,262,000 | 49.927336 | 115 | 0.547901 | false | 2.290383 | false | false | false |
Ninad998/FinalYearProject | deep_stylo/migrations/0001_initial.py | 1 | 1563 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-24 16:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('doc_id', models.IntegerField()),
('authorList', models.CharField(max_length=200)),
('predicted_author', models.CharField(max_length=200, null=True)),
('train_accuracy', models.DecimalField(decimal_places=10, max_digits=11, null=True)),
('validation_accuracy', models.DecimalField(decimal_places=10, max_digits=11, null=True)),
('test_accuracy', models.DecimalField(decimal_places=10, max_digits=11, null=True)),
('test_binary', models.DecimalField(decimal_places=1, max_digits=2, null=True)),
('upload_date', models.DateTimeField(default=django.utils.timezone.now)),
('status', models.DecimalField(decimal_places=1, default=0.0, max_digits=2)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| mit | -7,605,435,888,718,739,000 | 42.416667 | 118 | 0.627639 | false | 4.070313 | false | false | false |
Ayrx/cryptography | src/_cffi_src/openssl/crypto.py | 1 | 3371 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/crypto.h>
"""
TYPES = """
static const long Cryptography_HAS_LOCKING_CALLBACKS;
static const int SSLEAY_VERSION;
static const int SSLEAY_CFLAGS;
static const int SSLEAY_PLATFORM;
static const int SSLEAY_DIR;
static const int SSLEAY_BUILT_ON;
static const int OPENSSL_VERSION;
static const int OPENSSL_CFLAGS;
static const int OPENSSL_BUILT_ON;
static const int OPENSSL_PLATFORM;
static const int OPENSSL_DIR;
static const int CRYPTO_MEM_CHECK_ON;
static const int CRYPTO_MEM_CHECK_OFF;
static const int CRYPTO_MEM_CHECK_ENABLE;
static const int CRYPTO_MEM_CHECK_DISABLE;
static const int CRYPTO_LOCK;
static const int CRYPTO_UNLOCK;
static const int CRYPTO_READ;
static const int CRYPTO_LOCK_SSL;
"""
FUNCTIONS = """
int CRYPTO_mem_ctrl(int);
int CRYPTO_is_mem_check_on(void);
void CRYPTO_mem_leaks(struct bio_st *);
"""
MACROS = """
/* CRYPTO_cleanup_all_ex_data became a macro in 1.1.0 */
void CRYPTO_cleanup_all_ex_data(void);
/* as of 1.1.0 OpenSSL does its own locking *angelic chorus*. These functions
have become macros that are no ops */
int CRYPTO_num_locks(void);
void CRYPTO_set_locking_callback(void(*)(int, int, const char *, int));
void (*CRYPTO_get_locking_callback(void))(int, int, const char *, int);
/* SSLeay was removed in 1.1.0 */
unsigned long SSLeay(void);
const char *SSLeay_version(int);
/* these functions were added to replace the SSLeay functions in 1.1.0 */
unsigned long OpenSSL_version_num(void);
const char *OpenSSL_version(int);
/* this is a macro in 1.1.0 */
void OPENSSL_free(void *);
/* This was removed in 1.1.0 */
void CRYPTO_lock(int, int, const char *, int);
"""
CUSTOMIZATIONS = """
/* In 1.1.0 SSLeay has finally been retired. We bidirectionally define the
values so you can use either one. This is so we can use the new function
names no matter what OpenSSL we're running on, but users on older pyOpenSSL
releases won't see issues if they're running OpenSSL 1.1.0 */
#if !defined(SSLEAY_VERSION)
# define SSLeay OpenSSL_version_num
# define SSLeay_version OpenSSL_version
# define SSLEAY_VERSION_NUMBER OPENSSL_VERSION_NUMBER
# define SSLEAY_VERSION OPENSSL_VERSION
# define SSLEAY_CFLAGS OPENSSL_CFLAGS
# define SSLEAY_BUILT_ON OPENSSL_BUILT_ON
# define SSLEAY_PLATFORM OPENSSL_PLATFORM
# define SSLEAY_DIR OPENSSL_DIR
#endif
#if !defined(OPENSSL_VERSION)
# define OpenSSL_version_num SSLeay
# define OpenSSL_version SSLeay_version
# define OPENSSL_VERSION SSLEAY_VERSION
# define OPENSSL_CFLAGS SSLEAY_CFLAGS
# define OPENSSL_BUILT_ON SSLEAY_BUILT_ON
# define OPENSSL_PLATFORM SSLEAY_PLATFORM
# define OPENSSL_DIR SSLEAY_DIR
#endif
#if !defined(CRYPTO_LOCK)
static const long Cryptography_HAS_LOCKING_CALLBACKS = 0;
static const long CRYPTO_LOCK = 0;
static const long CRYPTO_UNLOCK = 0;
static const long CRYPTO_READ = 0;
static const long CRYPTO_LOCK_SSL = 0;
void (*CRYPTO_lock)(int, int, const char *, int) = NULL;
#else
static const long Cryptography_HAS_LOCKING_CALLBACKS = 1;
#endif
"""
| bsd-3-clause | 7,430,546,441,362,209,000 | 33.397959 | 79 | 0.723821 | false | 3.092661 | false | false | false |
mfsteen/CIQTranslate-Kristian | openpyxl/styles/fills.py | 1 | 5258 | from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
from openpyxl.descriptors import Float, Set, Alias, NoneSet
from openpyxl.descriptors.sequence import ValueSequence
from openpyxl.compat import safe_string
from .colors import ColorDescriptor, Color
from .hashable import HashableObject
from openpyxl.xml.functions import Element, localname, safe_iterator
from openpyxl.xml.constants import SHEET_MAIN_NS
FILL_NONE = 'none'
FILL_SOLID = 'solid'
FILL_PATTERN_DARKDOWN = 'darkDown'
FILL_PATTERN_DARKGRAY = 'darkGray'
FILL_PATTERN_DARKGRID = 'darkGrid'
FILL_PATTERN_DARKHORIZONTAL = 'darkHorizontal'
FILL_PATTERN_DARKTRELLIS = 'darkTrellis'
FILL_PATTERN_DARKUP = 'darkUp'
FILL_PATTERN_DARKVERTICAL = 'darkVertical'
FILL_PATTERN_GRAY0625 = 'gray0625'
FILL_PATTERN_GRAY125 = 'gray125'
FILL_PATTERN_LIGHTDOWN = 'lightDown'
FILL_PATTERN_LIGHTGRAY = 'lightGray'
FILL_PATTERN_LIGHTGRID = 'lightGrid'
FILL_PATTERN_LIGHTHORIZONTAL = 'lightHorizontal'
FILL_PATTERN_LIGHTTRELLIS = 'lightTrellis'
FILL_PATTERN_LIGHTUP = 'lightUp'
FILL_PATTERN_LIGHTVERTICAL = 'lightVertical'
FILL_PATTERN_MEDIUMGRAY = 'mediumGray'
fills = (FILL_SOLID, FILL_PATTERN_DARKDOWN, FILL_PATTERN_DARKGRAY,
FILL_PATTERN_DARKGRID, FILL_PATTERN_DARKHORIZONTAL, FILL_PATTERN_DARKTRELLIS,
FILL_PATTERN_DARKUP, FILL_PATTERN_DARKVERTICAL, FILL_PATTERN_GRAY0625,
FILL_PATTERN_GRAY125, FILL_PATTERN_LIGHTDOWN, FILL_PATTERN_LIGHTGRAY,
FILL_PATTERN_LIGHTGRID, FILL_PATTERN_LIGHTHORIZONTAL,
FILL_PATTERN_LIGHTTRELLIS, FILL_PATTERN_LIGHTUP, FILL_PATTERN_LIGHTVERTICAL,
FILL_PATTERN_MEDIUMGRAY)
class Fill(HashableObject):
"""Base class"""
tagname = "fill"
@classmethod
def from_tree(cls, el):
children = [c for c in el]
if not children:
return
child = children[0]
if "patternFill" in child.tag:
return PatternFill._from_tree(child)
else:
return GradientFill._from_tree(child)
class PatternFill(Fill):
"""Area fill patterns for use in styles.
Caution: if you do not specify a fill_type, other attributes will have
no effect !"""
tagname = "patternFill"
__fields__ = ('patternType',
'fgColor',
'bgColor')
__elements__ = ('fgColor', 'bgColor')
patternType = NoneSet(values=fills)
fill_type = Alias("patternType")
fgColor = ColorDescriptor()
start_color = Alias("fgColor")
bgColor = ColorDescriptor()
end_color = Alias("bgColor")
def __init__(self, patternType=None, fgColor=Color(), bgColor=Color(),
fill_type=None, start_color=None, end_color=None):
if fill_type is not None:
patternType = fill_type
self.patternType = patternType
if start_color is not None:
fgColor = start_color
self.fgColor = fgColor
if end_color is not None:
bgColor = end_color
self.bgColor = bgColor
@classmethod
def _from_tree(cls, el):
attrib = dict(el.attrib)
for child in el:
desc = localname(child)
attrib[desc] = Color.from_tree(child)
return cls(**attrib)
def to_tree(self, tagname=None):
parent = Element("fill")
el = Element(self.tagname)
if self.patternType is not None:
el.set('patternType', self.patternType)
for c in self.__elements__:
value = getattr(self, c)
if value != Color():
el.append(value.to_tree(c))
parent.append(el)
return parent
DEFAULT_EMPTY_FILL = PatternFill()
DEFAULT_GRAY_FILL = PatternFill(patternType='gray125')
def _serialise_stop(tagname, sequence, namespace=None):
for idx, color in enumerate(sequence):
stop = Element("stop", position=str(idx))
stop.append(color.to_tree())
yield stop
class GradientFill(Fill):
tagname = "gradientFill"
__fields__ = ('type', 'degree', 'left', 'right', 'top', 'bottom', 'stop')
type = Set(values=('linear', 'path'))
fill_type = Alias("type")
degree = Float()
left = Float()
right = Float()
top = Float()
bottom = Float()
stop = ValueSequence(expected_type=Color, to_tree=_serialise_stop)
def __init__(self, type="linear", degree=0, left=0, right=0, top=0,
bottom=0, stop=(), fill_type=None):
self.degree = degree
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self.stop = stop
if fill_type is not None:
type = fill_type
self.type = type
def __iter__(self):
for attr in self.__attrs__:
value = getattr(self, attr)
if value:
yield attr, safe_string(value)
@classmethod
def _from_tree(cls, node):
colors = []
for color in safe_iterator(node, "{%s}color" % SHEET_MAIN_NS):
colors.append(Color.from_tree(color))
return cls(stop=colors, **node.attrib)
def to_tree(self, tagname=None, namespace=None):
parent = Element("fill")
el = super(GradientFill, self).to_tree()
parent.append(el)
return parent
| gpl-3.0 | -6,165,026,717,945,432,000 | 29.218391 | 86 | 0.63294 | false | 3.500666 | false | false | false |
sserrot/champion_relationships | venv/Lib/site-packages/IPython/core/inputsplitter.py | 1 | 28155 | """DEPRECATED: Input handling and transformation machinery.
This module was deprecated in IPython 7.0, in favour of inputtransformer2.
The first class in this module, :class:`InputSplitter`, is designed to tell when
input from a line-oriented frontend is complete and should be executed, and when
the user should be prompted for another line of code instead. The name 'input
splitter' is largely for historical reasons.
A companion, :class:`IPythonInputSplitter`, provides the same functionality but
with full support for the extended IPython syntax (magics, system calls, etc).
The code to actually do these transformations is in :mod:`IPython.core.inputtransformer`.
:class:`IPythonInputSplitter` feeds the raw code to the transformers in order
and stores the results.
For more details, see the class docstrings below.
"""
from warnings import warn
warn('IPython.core.inputsplitter is deprecated since IPython 7 in favor of `IPython.core.inputtransformer2`',
DeprecationWarning)
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import ast
import codeop
import io
import re
import sys
import tokenize
import warnings
from IPython.core.inputtransformer import (leading_indent,
classic_prompt,
ipy_prompt,
cellmagic,
assemble_logical_lines,
help_end,
escaped_commands,
assign_from_magic,
assign_from_system,
assemble_python_lines,
)
# These are available in this module for backwards compatibility.
from IPython.core.inputtransformer import (ESC_SHELL, ESC_SH_CAP, ESC_HELP,
ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,
ESC_QUOTE, ESC_QUOTE2, ESC_PAREN, ESC_SEQUENCES)
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
# FIXME: These are general-purpose utilities that later can be moved to the
# general ward. Kept here for now because we're being very strict about test
# coverage with this code, and this lets us ensure that we keep 100% coverage
# while developing.
# compiled regexps for autoindent management
dedent_re = re.compile('|'.join([
r'^\s+raise(\s.*)?$', # raise statement (+ space + other stuff, maybe)
r'^\s+raise\([^\)]*\).*$', # wacky raise with immediate open paren
r'^\s+return(\s.*)?$', # normal return (+ space + other stuff, maybe)
r'^\s+return\([^\)]*\).*$', # wacky return with immediate open paren
r'^\s+pass\s*$', # pass (optionally followed by trailing spaces)
r'^\s+break\s*$', # break (optionally followed by trailing spaces)
r'^\s+continue\s*$', # continue (optionally followed by trailing spaces)
]))
ini_spaces_re = re.compile(r'^([ \t\r\f\v]+)')
# regexp to match pure comment lines so we don't accidentally insert 'if 1:'
# before pure comments
comment_line_re = re.compile(r'^\s*\#')
def num_ini_spaces(s):
"""Return the number of initial spaces in a string.
Note that tabs are counted as a single space. For now, we do *not* support
mixing of tabs and spaces in the user's input.
Parameters
----------
s : string
Returns
-------
n : int
"""
ini_spaces = ini_spaces_re.match(s)
if ini_spaces:
return ini_spaces.end()
else:
return 0
# Fake token types for partial_tokenize:
INCOMPLETE_STRING = tokenize.N_TOKENS
IN_MULTILINE_STATEMENT = tokenize.N_TOKENS + 1
# The 2 classes below have the same API as TokenInfo, but don't try to look up
# a token type name that they won't find.
class IncompleteString:
type = exact_type = INCOMPLETE_STRING
def __init__(self, s, start, end, line):
self.s = s
self.start = start
self.end = end
self.line = line
class InMultilineStatement:
type = exact_type = IN_MULTILINE_STATEMENT
def __init__(self, pos, line):
self.s = ''
self.start = self.end = pos
self.line = line
def partial_tokens(s):
"""Iterate over tokens from a possibly-incomplete string of code.
This adds two special token types: INCOMPLETE_STRING and
IN_MULTILINE_STATEMENT. These can only occur as the last token yielded, and
represent the two main ways for code to be incomplete.
"""
readline = io.StringIO(s).readline
token = tokenize.TokenInfo(tokenize.NEWLINE, '', (1, 0), (1, 0), '')
try:
for token in tokenize.generate_tokens(readline):
yield token
except tokenize.TokenError as e:
# catch EOF error
lines = s.splitlines(keepends=True)
end = len(lines), len(lines[-1])
if 'multi-line string' in e.args[0]:
l, c = start = token.end
s = lines[l-1][c:] + ''.join(lines[l:])
yield IncompleteString(s, start, end, lines[-1])
elif 'multi-line statement' in e.args[0]:
yield InMultilineStatement(end, lines[-1])
else:
raise
def find_next_indent(code):
"""Find the number of spaces for the next line of indentation"""
tokens = list(partial_tokens(code))
if tokens[-1].type == tokenize.ENDMARKER:
tokens.pop()
if not tokens:
return 0
while (tokens[-1].type in {tokenize.DEDENT, tokenize.NEWLINE, tokenize.COMMENT}):
tokens.pop()
if tokens[-1].type == INCOMPLETE_STRING:
# Inside a multiline string
return 0
# Find the indents used before
prev_indents = [0]
def _add_indent(n):
if n != prev_indents[-1]:
prev_indents.append(n)
tokiter = iter(tokens)
for tok in tokiter:
if tok.type in {tokenize.INDENT, tokenize.DEDENT}:
_add_indent(tok.end[1])
elif (tok.type == tokenize.NL):
try:
_add_indent(next(tokiter).start[1])
except StopIteration:
break
last_indent = prev_indents.pop()
# If we've just opened a multiline statement (e.g. 'a = ['), indent more
if tokens[-1].type == IN_MULTILINE_STATEMENT:
if tokens[-2].exact_type in {tokenize.LPAR, tokenize.LSQB, tokenize.LBRACE}:
return last_indent + 4
return last_indent
if tokens[-1].exact_type == tokenize.COLON:
# Line ends with colon - indent
return last_indent + 4
if last_indent:
# Examine the last line for dedent cues - statements like return or
# raise which normally end a block of code.
last_line_starts = 0
for i, tok in enumerate(tokens):
if tok.type == tokenize.NEWLINE:
last_line_starts = i + 1
last_line_tokens = tokens[last_line_starts:]
names = [t.string for t in last_line_tokens if t.type == tokenize.NAME]
if names and names[0] in {'raise', 'return', 'pass', 'break', 'continue'}:
# Find the most recent indentation less than the current level
for indent in reversed(prev_indents):
if indent < last_indent:
return indent
return last_indent
def last_blank(src):
"""Determine if the input source ends in a blank.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
ll = src.splitlines()[-1]
return (ll == '') or ll.isspace()
last_two_blanks_re = re.compile(r'\n\s*\n\s*$', re.MULTILINE)
last_two_blanks_re2 = re.compile(r'.+\n\s*\n\s+$', re.MULTILINE)
def last_two_blanks(src):
"""Determine if the input source ends in two blanks.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
# The logic here is tricky: I couldn't get a regexp to work and pass all
# the tests, so I took a different approach: split the source by lines,
# grab the last two and prepend '###\n' as a stand-in for whatever was in
# the body before the last two lines. Then, with that structure, it's
# possible to analyze with two regexps. Not the most elegant solution, but
# it works. If anyone tries to change this logic, make sure to validate
# the whole test suite first!
new_src = '\n'.join(['###\n'] + src.splitlines()[-2:])
return (bool(last_two_blanks_re.match(new_src)) or
bool(last_two_blanks_re2.match(new_src)) )
def remove_comments(src):
"""Remove all comments from input source.
Note: comments are NOT recognized inside of strings!
Parameters
----------
src : string
A single or multiline input string.
Returns
-------
String with all Python comments removed.
"""
return re.sub('#.*', '', src)
def get_input_encoding():
"""Return the default standard input encoding.
If sys.stdin has no encoding, 'ascii' is returned."""
# There are strange environments for which sys.stdin.encoding is None. We
# ensure that a valid encoding is returned.
encoding = getattr(sys.stdin, 'encoding', None)
if encoding is None:
encoding = 'ascii'
return encoding
#-----------------------------------------------------------------------------
# Classes and functions for normal Python syntax handling
#-----------------------------------------------------------------------------
class InputSplitter(object):
r"""An object that can accumulate lines of Python source before execution.
This object is designed to be fed python source line-by-line, using
:meth:`push`. It will return on each push whether the currently pushed
code could be executed already. In addition, it provides a method called
:meth:`push_accepts_more` that can be used to query whether more input
can be pushed into a single interactive block.
This is a simple example of how an interactive terminal-based client can use
this tool::
isp = InputSplitter()
while isp.push_accepts_more():
indent = ' '*isp.indent_spaces
prompt = '>>> ' + indent
line = indent + raw_input(prompt)
isp.push(line)
print 'Input source was:\n', isp.source_reset(),
"""
# A cache for storing the current indentation
# The first value stores the most recently processed source input
# The second value is the number of spaces for the current indentation
# If self.source matches the first value, the second value is a valid
# current indentation. Otherwise, the cache is invalid and the indentation
# must be recalculated.
_indent_spaces_cache = None, None
# String, indicating the default input encoding. It is computed by default
# at initialization time via get_input_encoding(), but it can be reset by a
# client with specific knowledge of the encoding.
encoding = ''
# String where the current full source input is stored, properly encoded.
# Reading this attribute is the normal way of querying the currently pushed
# source code, that has been properly encoded.
source = ''
# Code object corresponding to the current source. It is automatically
# synced to the source, so it can be queried at any time to obtain the code
# object; it will be None if the source doesn't compile to valid Python.
code = None
# Private attributes
# List with lines of input accumulated so far
_buffer = None
# Command compiler
_compile = None
# Boolean indicating whether the current block is complete
_is_complete = None
# Boolean indicating whether the current block has an unrecoverable syntax error
_is_invalid = False
def __init__(self):
"""Create a new InputSplitter instance.
"""
self._buffer = []
self._compile = codeop.CommandCompiler()
self.encoding = get_input_encoding()
def reset(self):
"""Reset the input buffer and associated state."""
self._buffer[:] = []
self.source = ''
self.code = None
self._is_complete = False
self._is_invalid = False
def source_reset(self):
"""Return the input source and perform a full reset.
"""
out = self.source
self.reset()
return out
def check_complete(self, source):
"""Return whether a block of code is ready to execute, or should be continued
This is a non-stateful API, and will reset the state of this InputSplitter.
Parameters
----------
source : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent_spaces : int or None
The number of spaces by which to indent the next line of code. If
status is not 'incomplete', this is None.
"""
self.reset()
try:
self.push(source)
except SyntaxError:
# Transformers in IPythonInputSplitter can raise SyntaxError,
# which push() will not catch.
return 'invalid', None
else:
if self._is_invalid:
return 'invalid', None
elif self.push_accepts_more():
return 'incomplete', self.get_indent_spaces()
else:
return 'complete', None
finally:
self.reset()
def push(self, lines:str) -> bool:
"""Push one or more lines of input.
This stores the given lines and returns a status code indicating
whether the code forms a complete Python block or not.
Any exceptions generated in compilation are swallowed, but if an
exception was produced, the method returns True.
Parameters
----------
lines : string
One or more lines of Python input.
Returns
-------
is_complete : boolean
True if the current input source (the result of the current input
plus prior inputs) forms a complete Python execution block. Note that
this value is also stored as a private attribute (``_is_complete``), so it
can be queried at any time.
"""
assert isinstance(lines, str)
self._store(lines)
source = self.source
# Before calling _compile(), reset the code object to None so that if an
# exception is raised in compilation, we don't mislead by having
# inconsistent code/source attributes.
self.code, self._is_complete = None, None
self._is_invalid = False
# Honor termination lines properly
if source.endswith('\\\n'):
return False
try:
with warnings.catch_warnings():
warnings.simplefilter('error', SyntaxWarning)
self.code = self._compile(source, symbol="exec")
# Invalid syntax can produce any of a number of different errors from
# inside the compiler, so we have to catch them all. Syntax errors
# immediately produce a 'ready' block, so the invalid Python can be
# sent to the kernel for evaluation with possible ipython
# special-syntax conversion.
except (SyntaxError, OverflowError, ValueError, TypeError,
MemoryError, SyntaxWarning):
self._is_complete = True
self._is_invalid = True
else:
# Compilation didn't produce any exceptions (though it may not have
# given a complete code object)
self._is_complete = self.code is not None
return self._is_complete
def push_accepts_more(self):
"""Return whether a block of interactive input can accept more input.
This method is meant to be used by line-oriented frontends, who need to
guess whether a block is complete or not based solely on prior and
current input lines. The InputSplitter considers it has a complete
interactive block and will not accept more input when either:
* A SyntaxError is raised
* The code is complete and consists of a single line or a single
non-compound statement
* The code is complete and has a blank line at the end
If the current input produces a syntax error, this method immediately
returns False but does *not* raise the syntax error exception, as
typically clients will want to send invalid syntax to an execution
backend which might convert the invalid syntax into valid Python via
one of the dynamic IPython mechanisms.
"""
# With incomplete input, unconditionally accept more
# A syntax error also sets _is_complete to True - see push()
if not self._is_complete:
#print("Not complete") # debug
return True
# The user can make any (complete) input execute by leaving a blank line
last_line = self.source.splitlines()[-1]
if (not last_line) or last_line.isspace():
#print("Blank line") # debug
return False
# If there's just a single line or AST node, and we're flush left, as is
# the case after a simple statement such as 'a=1', we want to execute it
# straight away.
if self.get_indent_spaces() == 0:
if len(self.source.splitlines()) <= 1:
return False
try:
code_ast = ast.parse(u''.join(self._buffer))
except Exception:
#print("Can't parse AST") # debug
return False
else:
if len(code_ast.body) == 1 and \
not hasattr(code_ast.body[0], 'body'):
#print("Simple statement") # debug
return False
# General fallback - accept more code
return True
def get_indent_spaces(self):
sourcefor, n = self._indent_spaces_cache
if sourcefor == self.source:
return n
# self.source always has a trailing newline
n = find_next_indent(self.source[:-1])
self._indent_spaces_cache = (self.source, n)
return n
# Backwards compatibility. I think all code that used .indent_spaces was
# inside IPython, but we can leave this here until IPython 7 in case any
# other modules are using it. -TK, November 2017
indent_spaces = property(get_indent_spaces)
def _store(self, lines, buffer=None, store='source'):
"""Store one or more lines of input.
If input lines are not newline-terminated, a newline is automatically
appended."""
if buffer is None:
buffer = self._buffer
if lines.endswith('\n'):
buffer.append(lines)
else:
buffer.append(lines+'\n')
setattr(self, store, self._set_source(buffer))
def _set_source(self, buffer):
return u''.join(buffer)
class IPythonInputSplitter(InputSplitter):
"""An input splitter that recognizes all of IPython's special syntax."""
# String with raw, untransformed input.
source_raw = ''
# Flag to track when a transformer has stored input that it hasn't given
# back yet.
transformer_accumulating = False
# Flag to track when assemble_python_lines has stored input that it hasn't
# given back yet.
within_python_line = False
# Private attributes
# List with lines of raw input accumulated so far.
_buffer_raw = None
def __init__(self, line_input_checker=True, physical_line_transforms=None,
logical_line_transforms=None, python_line_transforms=None):
super(IPythonInputSplitter, self).__init__()
self._buffer_raw = []
self._validate = True
if physical_line_transforms is not None:
self.physical_line_transforms = physical_line_transforms
else:
self.physical_line_transforms = [
leading_indent(),
classic_prompt(),
ipy_prompt(),
cellmagic(end_on_blank_line=line_input_checker),
]
self.assemble_logical_lines = assemble_logical_lines()
if logical_line_transforms is not None:
self.logical_line_transforms = logical_line_transforms
else:
self.logical_line_transforms = [
help_end(),
escaped_commands(),
assign_from_magic(),
assign_from_system(),
]
self.assemble_python_lines = assemble_python_lines()
if python_line_transforms is not None:
self.python_line_transforms = python_line_transforms
else:
# We don't use any of these at present
self.python_line_transforms = []
@property
def transforms(self):
"Quick access to all transformers."
return self.physical_line_transforms + \
[self.assemble_logical_lines] + self.logical_line_transforms + \
[self.assemble_python_lines] + self.python_line_transforms
@property
def transforms_in_use(self):
"""Transformers, excluding logical line transformers if we're in a
Python line."""
t = self.physical_line_transforms[:]
if not self.within_python_line:
t += [self.assemble_logical_lines] + self.logical_line_transforms
return t + [self.assemble_python_lines] + self.python_line_transforms
def reset(self):
"""Reset the input buffer and associated state."""
super(IPythonInputSplitter, self).reset()
self._buffer_raw[:] = []
self.source_raw = ''
self.transformer_accumulating = False
self.within_python_line = False
for t in self.transforms:
try:
t.reset()
except SyntaxError:
# Nothing that calls reset() expects to handle transformer
# errors
pass
def flush_transformers(self):
def _flush(transform, outs):
"""yield transformed lines
always strings, never None
transform: the current transform
outs: an iterable of previously transformed inputs.
Each may be multiline, which will be passed
one line at a time to transform.
"""
for out in outs:
for line in out.splitlines():
# push one line at a time
tmp = transform.push(line)
if tmp is not None:
yield tmp
# reset the transform
tmp = transform.reset()
if tmp is not None:
yield tmp
out = []
for t in self.transforms_in_use:
out = _flush(t, out)
out = list(out)
if out:
self._store('\n'.join(out))
def raw_reset(self):
"""Return raw input only and perform a full reset.
"""
out = self.source_raw
self.reset()
return out
def source_reset(self):
try:
self.flush_transformers()
return self.source
finally:
self.reset()
def push_accepts_more(self):
if self.transformer_accumulating:
return True
else:
return super(IPythonInputSplitter, self).push_accepts_more()
def transform_cell(self, cell):
"""Process and translate a cell of input.
"""
self.reset()
try:
self.push(cell)
self.flush_transformers()
return self.source
finally:
self.reset()
def push(self, lines:str) -> bool:
"""Push one or more lines of IPython input.
This stores the given lines and returns a status code indicating
whether the code forms a complete Python block or not, after processing
all input lines for special IPython syntax.
Any exceptions generated in compilation are swallowed, but if an
exception was produced, the method returns True.
Parameters
----------
lines : string
One or more lines of Python input.
Returns
-------
is_complete : boolean
True if the current input source (the result of the current input
plus prior inputs) forms a complete Python execution block. Note that
this value is also stored as a private attribute (_is_complete), so it
can be queried at any time.
"""
assert isinstance(lines, str)
# We must ensure all input is pure unicode
# ''.splitlines() --> [], but we need to push the empty line to transformers
lines_list = lines.splitlines()
if not lines_list:
lines_list = ['']
# Store raw source before applying any transformations to it. Note
# that this must be done *after* the reset() call that would otherwise
# flush the buffer.
self._store(lines, self._buffer_raw, 'source_raw')
transformed_lines_list = []
for line in lines_list:
transformed = self._transform_line(line)
if transformed is not None:
transformed_lines_list.append(transformed)
if transformed_lines_list:
transformed_lines = '\n'.join(transformed_lines_list)
return super(IPythonInputSplitter, self).push(transformed_lines)
else:
# Got nothing back from transformers - they must be waiting for
# more input.
return False
def _transform_line(self, line):
"""Push a line of input code through the various transformers.
Returns any output from the transformers, or None if a transformer
is accumulating lines.
Sets self.transformer_accumulating as a side effect.
"""
def _accumulating(dbg):
#print(dbg)
self.transformer_accumulating = True
return None
for transformer in self.physical_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
if not self.within_python_line:
line = self.assemble_logical_lines.push(line)
if line is None:
return _accumulating('acc logical line')
for transformer in self.logical_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
line = self.assemble_python_lines.push(line)
if line is None:
self.within_python_line = True
return _accumulating('acc python line')
else:
self.within_python_line = False
for transformer in self.python_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
#print("transformers clear") #debug
self.transformer_accumulating = False
return line
| mit | 8,784,860,315,369,942,000 | 35.470207 | 109 | 0.58764 | false | 4.60049 | false | false | false |
Ajapaik/ajapaik-web | ajapaik/ajapaik_face_recognition/management/commands/run_face_encoding_on_unencoded_rectangles.py | 1 | 1387 | import multiprocessing
from json import loads, dumps
import face_recognition
from django.core.management.base import BaseCommand
from ajapaik.ajapaik_face_recognition.models import FaceRecognitionRectangle
def encode_single_rectangle(rectangle: FaceRecognitionRectangle) -> None:
print('Processing rectangle %s' % rectangle.pk)
try:
image = face_recognition.load_image_file(rectangle.photo.image)
except: # noqa
return
try:
encodings = face_recognition.face_encodings(image, known_face_locations=[loads(rectangle.coordinates)])
except: # noqa
return
if len(encodings) == 1:
my_encoding = encodings[0]
try:
rectangle.face_encoding = dumps(my_encoding.tolist())
rectangle.save()
except: # noqa
return
else:
print('Found % face encodings for rectangle %s, should find only 1' % (len(encodings), rectangle.id))
class Command(BaseCommand):
help = 'Will run face encoding on all identified faces'
args = 'subject_id'
def handle(self, *args, **options):
unknown_rectangles = FaceRecognitionRectangle.objects.filter(face_encoding__isnull=True).all()
print('Found %s rectangles to run on' % unknown_rectangles.count())
with multiprocessing.Pool() as pool:
pool.map(encode_single_rectangle, unknown_rectangles)
| gpl-3.0 | -8,899,417,717,405,903,000 | 34.564103 | 111 | 0.681327 | false | 4.031977 | false | false | false |
Tejal011089/trufil-erpnext | erpnext/stock/doctype/item/test_item.py | 1 | 2870 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.test_runner import make_test_records
from erpnext.stock.doctype.item.item import WarehouseNotSet, ItemTemplateCannotHaveStock
from erpnext.stock.doctype.stock_entry.test_stock_entry import make_stock_entry
test_ignore = ["BOM"]
test_dependencies = ["Warehouse"]
def make_item(item_code, properties=None):
if frappe.db.exists("Item", item_code):
return frappe.get_doc("Item", item_code)
item = frappe.get_doc({
"doctype": "Item",
"item_code": item_code,
"item_name": item_code,
"description": item_code,
"item_group": "Products"
})
if properties:
item.update(properties)
if item.is_stock_item and not item.default_warehouse:
item.default_warehouse = "_Test Warehouse - _TC"
item.insert()
return item
class TestItem(unittest.TestCase):
def get_item(self, idx):
item_code = test_records[idx].get("item_code")
if not frappe.db.exists("Item", item_code):
item = frappe.copy_doc(test_records[idx])
item.insert()
else:
item = frappe.get_doc("Item", item_code)
return item
def test_template_cannot_have_stock(self):
item = self.get_item(10)
make_stock_entry(item_code=item.name, target="Stores - _TC", qty=1, incoming_rate=1)
item.has_variants = 1
self.assertRaises(ItemTemplateCannotHaveStock, item.save)
def test_default_warehouse(self):
item = frappe.copy_doc(test_records[0])
item.is_stock_item = 1
item.default_warehouse = None
self.assertRaises(WarehouseNotSet, item.insert)
def test_get_item_details(self):
from erpnext.stock.get_item_details import get_item_details
to_check = {
"item_code": "_Test Item",
"item_name": "_Test Item",
"description": "_Test Item 1",
"warehouse": "_Test Warehouse - _TC",
"income_account": "Sales - _TC",
"expense_account": "_Test Account Cost for Goods Sold - _TC",
"cost_center": "_Test Cost Center 2 - _TC",
"qty": 1.0,
"price_list_rate": 100.0,
"base_price_list_rate": 0.0,
"discount_percentage": 0.0,
"rate": 0.0,
"base_rate": 0.0,
"amount": 0.0,
"base_amount": 0.0,
"batch_no": None,
"item_tax_rate": '{}',
"uom": "_Test UOM",
"conversion_factor": 1.0,
}
make_test_records("Item Price")
details = get_item_details({
"item_code": "_Test Item",
"company": "_Test Company",
"price_list": "_Test Price List",
"currency": "_Test Currency",
"parenttype": "Sales Order",
"conversion_rate": 1,
"price_list_currency": "_Test Currency",
"plc_conversion_rate": 1,
"order_type": "Sales",
"transaction_type": "selling"
})
for key, value in to_check.iteritems():
self.assertEquals(value, details.get(key))
test_records = frappe.get_test_records('Item')
| agpl-3.0 | -5,232,201,362,335,797,000 | 27.137255 | 88 | 0.674913 | false | 2.937564 | true | false | false |
petterreinholdtsen/frikanalen | fkbeta/fk/admin.py | 1 | 2538 | # Copyright (c) 2012-2013 Benjamin Bruheim <[email protected]>
# This file is covered by the LGPLv3 or later, read COPYING for details.
from django.contrib import admin
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from fk.models import FileFormat
from fk.models import Organization
from fk.models import UserProfile
from fk.models import Video, Category, Scheduleitem
from fk.models import VideoFile
from fk.models import SchedulePurpose, WeeklySlot
# In order to display the userprofile on
admin.site.unregister(User)
class UserProfileInline(admin.StackedInline):
model = UserProfile
class UserProfileAdmin(UserAdmin):
inlines = [ UserProfileInline, ]
class VideoFileInline(admin.StackedInline):
fields = ('format', 'filename', 'old_filename')
#readonly_fields = ['format', 'filename']
model = VideoFile
extra = 0
class VideoAdmin(admin.ModelAdmin):
list_display = ('name', 'editor', 'organization')
inlines = [VideoFileInline]
search_fields = ["name", "description", "organization__name", "header", "editor__username"]
list_filter = ("proper_import", "is_filler", "publish_on_web", "has_tono_records")
class OrganizationAdmin(admin.ModelAdmin):
list_display = ('name', 'fkmember', 'orgnr')
filter_horizontal = ("members",)
list_filter = ('fkmember',)
ordering = ('name',)
class ScheduleitemAdmin(admin.ModelAdmin):
list_filter = ("starttime", )
list_display = ('__str__',
'video',
'schedulereason',
'starttime',
'duration')
#list_display_links = ('starttime', 'video',)
#inlines = [VideoInline]
#exclude = ('video',)
search_fields = ["video__name", "video__organization__name"]
ordering = ('starttime',)
class SchedulePurposeAdmin(admin.ModelAdmin):
list_display = (
'__str__',
'videos_str',
)
filter_horizontal = ('direct_videos',)
class WeeklySlotAdmin(admin.ModelAdmin):
list_display = (
'__str__',
'day',
'start_time',
'duration',
'purpose',
)
admin.site.register(Category)
admin.site.register(FileFormat)
admin.site.register(Organization, OrganizationAdmin)
admin.site.register(SchedulePurpose, SchedulePurposeAdmin)
admin.site.register(Scheduleitem, ScheduleitemAdmin)
admin.site.register(User, UserProfileAdmin)
admin.site.register(Video, VideoAdmin)
admin.site.register(VideoFile)
admin.site.register(WeeklySlot, WeeklySlotAdmin)
| lgpl-3.0 | -431,953,171,379,978,940 | 31.126582 | 95 | 0.684791 | false | 3.85129 | false | false | false |
janusnic/ecommerce | ecommerce/settings/local.py | 1 | 4047 | """Development settings and globals."""
from __future__ import absolute_import
import os
from os.path import join, normpath
from ecommerce.settings.base import *
from ecommerce.settings.logger import get_logger_config
# DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG CONFIGURATION
# EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# END EMAIL CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': normpath(join(DJANGO_ROOT, 'default.db')),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
'ATOMIC_REQUESTS': True,
}
}
# END DATABASE CONFIGURATION
# CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# END CACHE CONFIGURATION
# TOOLBAR CONFIGURATION
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
if os.environ.get('ENABLE_DJANGO_TOOLBAR', False):
INSTALLED_APPS += (
'debug_toolbar',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1',)
# END TOOLBAR CONFIGURATION
# URL CONFIGURATION
ECOMMERCE_URL_ROOT = 'http://localhost:8002'
LMS_URL_ROOT = 'http://127.0.0.1:8000'
# The location of the LMS heartbeat page
LMS_HEARTBEAT_URL = get_lms_url('/heartbeat')
# The location of the LMS student dashboard
LMS_DASHBOARD_URL = get_lms_url('/dashboard')
OAUTH2_PROVIDER_URL = get_lms_url('/oauth2')
COMMERCE_API_URL = get_lms_url('/api/commerce/v1/')
# END URL CONFIGURATION
# AUTHENTICATION
# Set these to the correct values for your OAuth2/OpenID Connect provider (e.g., devstack)
SOCIAL_AUTH_EDX_OIDC_KEY = 'replace-me'
SOCIAL_AUTH_EDX_OIDC_SECRET = 'replace-me'
SOCIAL_AUTH_EDX_OIDC_URL_ROOT = OAUTH2_PROVIDER_URL
SOCIAL_AUTH_EDX_OIDC_ID_TOKEN_DECRYPTION_KEY = SOCIAL_AUTH_EDX_OIDC_SECRET
JWT_AUTH.update({
'JWT_SECRET_KEY': 'insecure-secret-key',
'JWT_ISSUER': OAUTH2_PROVIDER_URL
})
# END AUTHENTICATION
# ORDER PROCESSING
ENROLLMENT_API_URL = get_lms_url('/api/enrollment/v1/enrollment')
ENROLLMENT_FULFILLMENT_TIMEOUT = 15 # devstack is slow!
EDX_API_KEY = 'replace-me'
# END ORDER PROCESSING
# PAYMENT PROCESSING
PAYMENT_PROCESSOR_CONFIG = {
'cybersource': {
'soap_api_url': 'https://ics2wstest.ic3.com/commerce/1.x/transactionProcessor/CyberSourceTransaction_1.115.wsdl',
'merchant_id': 'fake-merchant-id',
'transaction_key': 'fake-transaction-key',
'profile_id': 'fake-profile-id',
'access_key': 'fake-access-key',
'secret_key': 'fake-secret-key',
'payment_page_url': 'https://testsecureacceptance.cybersource.com/pay',
'receipt_page_url': get_lms_url('/commerce/checkout/receipt/'),
'cancel_page_url': get_lms_url('/commerce/checkout/cancel/'),
},
'paypal': {
'mode': 'sandbox',
'client_id': 'fake-client-id',
'client_secret': 'fake-client-secret',
'receipt_url': get_lms_url('/commerce/checkout/receipt/'),
'cancel_url': get_lms_url('/commerce/checkout/cancel/'),
},
}
# END PAYMENT PROCESSING
ENABLE_AUTO_AUTH = True
LOGGING = get_logger_config(debug=DEBUG, dev_env=True, local_loglevel='DEBUG')
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=import-error
except ImportError:
pass
| agpl-3.0 | -3,845,165,324,190,291,000 | 27.907143 | 121 | 0.677045 | false | 3.227273 | true | false | false |
mmasaki/trove | trove/tests/tempest/tests/api/versions/test_versions.py | 1 | 1650 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from testtools import testcase as testtools
from trove.tests.tempest.tests.api import base
class DatabaseVersionsTest(base.BaseDatabaseTest):
@classmethod
def setup_clients(cls):
super(DatabaseVersionsTest, cls).setup_clients()
cls.client = cls.database_versions_client
@testtools.attr('smoke')
@decorators.idempotent_id('6952cd77-90cd-4dca-bb60-8e2c797940cf')
def test_list_db_versions(self):
versions = self.client.list_db_versions()['versions']
self.assertTrue(len(versions) > 0, "No database versions found")
# List of all versions should contain the current version, and there
# should only be one 'current' version
current_versions = list()
for version in versions:
if 'CURRENT' == version['status']:
current_versions.append(version['id'])
self.assertEqual(1, len(current_versions))
self.assertIn(self.db_current_version, current_versions)
| apache-2.0 | -8,393,112,534,774,205,000 | 39.243902 | 78 | 0.704242 | false | 4.084158 | true | false | false |
felipenaselva/repo.felipe | plugin.video.velocity/scrapers/putlocker_both.py | 1 | 15716 | import urllib2,urllib,re,os
import random
import urlparse
import sys
import xbmcplugin,xbmcgui,xbmc, xbmcaddon, downloader, extract, time
import tools
from libs import kodi
from tm_libs import dom_parser
from libs import log_utils
import tools
from libs import cloudflare
from libs import log_utils
from tm_libs import dom_parser
import cookielib
from StringIO import StringIO
import gzip
import main_scrape
import base64
addon_id = kodi.addon_id
timeout = int(kodi.get_setting('scraper_timeout'))
tools.create_directory(tools.AOPATH, "All_Cookies/Putlocker")
cookiepath = xbmc.translatePath(os.path.join('special://home','addons',addon_id,'All_Cookies','Putlocker/'))
cookiejar = os.path.join(cookiepath,'cookies.lwp')
cj = cookielib.LWPCookieJar()
cookie_file = os.path.join(cookiepath,'cookies.lwp')
def __enum(**enums):
return type('Enum', (), enums)
MAX_RESPONSE = 1024 * 1024 * 2
FORCE_NO_MATCH = '***FORCE_NO_MATCH***'
QUALITIES = __enum(LOW='Low', MEDIUM='Medium', HIGH='High', HD720='HD720', HD1080='HD1080')
XHR = {'X-Requested-With': 'XMLHttpRequest'}
USER_AGENT = "Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko"
BR_VERS = [
['%s.0' % i for i in xrange(18, 43)],
['37.0.2062.103', '37.0.2062.120', '37.0.2062.124', '38.0.2125.101', '38.0.2125.104', '38.0.2125.111', '39.0.2171.71', '39.0.2171.95', '39.0.2171.99', '40.0.2214.93', '40.0.2214.111',
'40.0.2214.115', '42.0.2311.90', '42.0.2311.135', '42.0.2311.152', '43.0.2357.81', '43.0.2357.124', '44.0.2403.155', '44.0.2403.157', '45.0.2454.101', '45.0.2454.85', '46.0.2490.71',
'46.0.2490.80', '46.0.2490.86', '47.0.2526.73', '47.0.2526.80'],
['11.0']]
WIN_VERS = ['Windows NT 10.0', 'Windows NT 7.0', 'Windows NT 6.3', 'Windows NT 6.2', 'Windows NT 6.1', 'Windows NT 6.0', 'Windows NT 5.1', 'Windows NT 5.0']
FEATURES = ['; WOW64', '; Win64; IA64', '; Win64; x64', '']
RAND_UAS = ['Mozilla/5.0 ({win_ver}{feature}; rv:{br_ver}) Gecko/20100101 Firefox/{br_ver}',
'Mozilla/5.0 ({win_ver}{feature}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{br_ver} Safari/537.36',
'Mozilla/5.0 ({win_ver}{feature}; Trident/7.0; rv:{br_ver}) like Gecko']
HOST_Q = {}
HOST_Q[QUALITIES.LOW] = ['youwatch', 'allmyvideos', 'played.to', 'gorillavid']
HOST_Q[QUALITIES.MEDIUM] = ['primeshare', 'exashare', 'bestreams', 'flashx', 'vidto', 'vodlocker', 'thevideo', 'vidzi', 'vidbull',
'realvid', 'nosvideo', 'daclips', 'sharerepo', 'zalaa', 'filehoot', 'vshare']
HOST_Q[QUALITIES.HIGH] = ['vidspot', 'mrfile', 'divxstage', 'streamcloud', 'mooshare', 'novamov', 'mail.ru', 'vid.ag']
HOST_Q[QUALITIES.HD720] = ['thefile', 'sharesix', 'filenuke', 'vidxden', 'movshare', 'nowvideo', 'vidbux', 'streamin.to', 'allvid.ch']
HOST_Q[QUALITIES.HD1080] = ['hugefiles', '180upload', 'mightyupload', 'videomega', 'allmyvideos']
Q_ORDER = {QUALITIES.LOW: 1, QUALITIES.MEDIUM: 2, QUALITIES.HIGH: 3, QUALITIES.HD720: 4, QUALITIES.HD1080: 5}
# base_url = 'http://www.santaseries.com'
base_url = kodi.get_setting('putlocker_base_url')
def format_source_label( item):
if 'label' in item:
return '[%s] %s (%s)' % (item['quality'], item['host'], item['label'])
else:
return '[%s] %s' % (item['quality'], item['host'])
def _http_get(url, cookies=None, data=None, multipart_data=None, headers=None, allow_redirect=True, cache_limit=8):
return get_cooked_url(url, base_url, timeout, cookies=cookies, data=data, multipart_data=multipart_data,
headers=headers, allow_redirect=allow_redirect, cache_limit=cache_limit)
def get_cooked_url(url, base_url, timeout, cookies=None, data=None, multipart_data=None, headers=None, allow_redirect=True, cache_limit=8):
if cookies is None: cookies = {}
if timeout == 0: timeout = None
if headers is None: headers = {}
referer = headers['Referer'] if 'Referer' in headers else url
if kodi.get_setting('debug') == "true":
log_utils.log('Getting Url: %s cookie=|%s| data=|%s| extra headers=|%s|' % (url, cookies, data, headers))
if data is not None:
if isinstance(data, basestring):
data = data
else:
data = urllib.urlencode(data, True)
if multipart_data is not None:
headers['Content-Type'] = 'multipart/form-data; boundary=X-X-X'
data = multipart_data
try:
cj = _set_cookies(base_url, cookies)
request = urllib2.Request(url, data=data)
request.add_header('User-Agent', _get_ua())
#request.add_unredirected_header('Host', base_url)
request.add_unredirected_header('Referer', referer)
for key in headers: request.add_header(key, headers[key])
cj.add_cookie_header(request)
if not allow_redirect:
opener = urllib2.build_opener(NoRedirection)
urllib2.install_opener(opener)
else:
opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
urllib2.install_opener(opener)
opener2 = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener2)
response = urllib2.urlopen(request, timeout=timeout)
cj.extract_cookies(response, request)
if kodi.get_setting('debug') == "true":
print 'Response Cookies: %s - %s' % (url, cookies_as_str(cj))
__fix_bad_cookies()
cj.save(ignore_discard=True)
if not allow_redirect and (response.getcode() in [301, 302, 303, 307] or response.info().getheader('Refresh')):
if response.info().getheader('Refresh') is not None:
refresh = response.info().getheader('Refresh')
return refresh.split(';')[-1].split('url=')[-1]
else:
return response.info().getheader('Location')
content_length = response.info().getheader('Content-Length', 0)
if int(content_length) > MAX_RESPONSE:
print 'Response exceeded allowed size. %s => %s / %s' % (url, content_length, MAX_RESPONSE)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO(response.read(MAX_RESPONSE))
f = gzip.GzipFile(fileobj=buf)
html = f.read()
else:
html = response.read(MAX_RESPONSE)
except urllib2.HTTPError as e:
if e.code == 503 and 'cf-browser-verification' in e.read():
print "WAS ERROR"
html = cloudflare.solve(url, cj, _get_ua())
if not html:
return ''
else:
print 'Error (%s) during THE scraper http get: %s' % (str(e), url)
return ''
except Exception as e:
print 'Error (%s) during scraper http get: %s' % (str(e), url)
return ''
return html
def get_url(url):
request=urllib2.Request(url)
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36')
response=urllib2.urlopen(request)
link=response.read()
cj.save(cookie_file, ignore_discard=True)
response.close()
return link
def _get_ua():
index = random.randrange(len(RAND_UAS))
user_agent = RAND_UAS[index].format(win_ver=random.choice(WIN_VERS), feature=random.choice(FEATURES), br_ver=random.choice(BR_VERS[index]))
print 'Creating New User Agent: %s' % (user_agent)
return user_agent
def _pathify_url(url):
url = url.replace('\/', '/')
pieces = urlparse.urlparse(url)
if pieces.scheme:
strip = pieces.scheme + ':'
else:
strip = ''
strip += '//' + pieces.netloc
url = url.replace(strip, '')
if not url.startswith('/'): url = '/' + url
url = url.replace('/./', '/')
print "returning pathify "+ url
return url
def _default_get_episode_url(show_url, video, episode_pattern, title_pattern='', airdate_pattern='', data=None, headers=None):
if 'http://' not in show_url:
url = urlparse.urljoin(base_url, show_url)
else:
url = base_url+show_url
html = get_url(url)
if html:
match = re.search(episode_pattern, html, re.DOTALL)
if match:
return _pathify_url(match.group(1))
else:
log_utils.log('Skipping as Episode not found: %s' % (url), log_utils.LOGDEBUG)
def make_vid_params(video_type, title, year, season, episode, ep_title, ep_airdate):
return '|%s|%s|%s|%s|%s|%s|%s|' % (video_type, title, year, season, episode, ep_title, ep_airdate)
def _set_cookies(base_url, cookies):
cj = cookielib.LWPCookieJar(cookie_file)
try: cj.load(ignore_discard=True)
except: pass
if kodi.get_setting('debug') == "true":
print 'Before Cookies: %s' % (cookies_as_str(cj))
domain = urlparse.urlsplit(base_url).hostname
for key in cookies:
c = cookielib.Cookie(0, key, str(cookies[key]), port=None, port_specified=False, domain=domain, domain_specified=True,
domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=False, comment=None,
comment_url=None, rest={})
cj.set_cookie(c)
cj.save(ignore_discard=True)
if kodi.get_setting('debug') == "true":
print 'After Cookies: %s' % (cookies_as_str(cj))
return cj
def cookies_as_str(cj):
s = ''
c = cj._cookies
for domain in c:
s += '{%s: ' % (domain)
for path in c[domain]:
s += '{%s: ' % (path)
for cookie in c[domain][path]:
s += '{%s=%s}' % (cookie, c[domain][path][cookie].value)
s += '}'
s += '} '
return s
def __fix_bad_cookies():
c = cj._cookies
for domain in c:
for path in c[domain]:
for key in c[domain][path]:
cookie = c[domain][path][key]
if cookie.expires > sys.maxint:
print 'Fixing cookie expiration for %s: was: %s now: %s' % (key, cookie.expires, sys.maxint)
cookie.expires = sys.maxint
def get_quality(video, host, base_quality=None):
host = host.lower()
# Assume movies are low quality, tv shows are high quality
if base_quality is None:
if video.video_type == "movies":
quality = QUALITIES.LOW
else:
quality = QUALITIES.HIGH
else:
quality = base_quality
host_quality = None
if host:
for key in HOST_Q:
if any(hostname in host for hostname in HOST_Q[key]):
host_quality = key
break
if host_quality is not None and Q_ORDER[host_quality] < Q_ORDER[quality]:
quality = host_quality
return quality
################ Below is custome Changes per Scraper#################
def _get_episode_url(show_url, video,season,episode):
episode_pattern = 'href="([^"]+season-%s-episode-%s-[^"]+)' % (season, episode)
title_pattern = 'href="(?P<url>[^"]+season-\d+-episode-\d+-[^"]+).*? \s+(?P<title>.*?)</td>'
return _default_get_episode_url(show_url, video, episode_pattern, title_pattern)
def search(video_type, title, year):
search_url = urlparse.urljoin(base_url, '/search/advanced_search.php?q=%s' % (urllib.quote_plus(title)))
if not year: year = 'Year'
search_url += '&year_from=%s&year_to=%s' % (year, year)
if video_type == "shows":
search_url += '§ion=2'
else:
search_url += '§ion=1'
html = _http_get(search_url, cache_limit=.25)
results = []
if not re.search('Sorry.*?find.*?looking\s+for', html, re.I):
r = re.search('Search Results For: "(.*?)</table>', html, re.DOTALL)
if r:
fragment = r.group(1)
pattern = r'<a\s+href="([^"]+)"\s+title="([^"]+)'
for match in re.finditer(pattern, fragment):
url, title_year = match.groups('')
match = re.search('(.*)\s+\((\d{4})\)', title_year)
if match:
match_title, match_year = match.groups()
else:
match_title = title_year
match_year = ''
result = {'url': _pathify_url(url), 'title': match_title, 'year': match_year}
results.append(result)
results = dict((result['url'], result) for result in results).values()
return results
def get_sources(video):
source_url = urlparse.urljoin(base_url, video)
#source_url = get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(base_url, source_url)
html = _http_get(url, cache_limit=.5)
for match in re.finditer('<a[^>]+href="([^"]+)[^>]+>(Version \d+)<', html):
url, version = match.groups()
host = urlparse.urlsplit(url).hostname.replace('embed.', '')
hoster = {'hostname':'Putlocker','multi-part': False, 'host': host, 'quality': get_quality(video, host, QUALITIES.HIGH), 'views': None, 'rating': None, 'url': url, 'direct': False}
hoster['version'] = version
hosters.append(hoster)
return hosters
def putlocker_tv(name,movie_title):
try:
title = movie_title[:-7]
movie_year = movie_title[-6:]
year = movie_year.replace('(','').replace(')','')
video_type = 'shows'
show_url = search(video_type,title,year)
for e in show_url:
url = e['url']
newseas=re.compile('S(.+?)E(.+?) (?P<name>[A-Za-z\t .]+)').findall(name)
print newseas
for sea,epi,epi_title in newseas:
video = make_vid_params('Episode',title,year,sea,epi,epi_title,'')
ep_url = _get_episode_url(url, video,sea,epi)
hosters=get_sources(ep_url)
hosters = main_scrape.apply_urlresolver(hosters)
return hosters
except Exception as e:
hosters =[]
log_utils.log('Error [%s] %s' % (str(e), ''), xbmc.LOGERROR)
if kodi.get_setting('error_notify') == "true":
kodi.notify(header='Putlocker TV',msg='(error) %s %s' % (str(e), ''),duration=5000,sound=None)
return hosters
def putlocker_movies(movie_title):
try:
title = movie_title[:-7]
movie_year = movie_title[-6:]
year = movie_year.replace('(','').replace(')','')
video_type = 'movies'
show_url = search(video_type,title,year)
for e in show_url:
url = e['url']
hosters=get_sources(url)
print "HOSTERS ARE " + str(hosters)
hosters = main_scrape.apply_urlresolver(hosters)
return hosters
except Exception as e:
hosters =[]
log_utils.log('Error [%s] %s' % (str(e), ''), xbmc.LOGERROR)
if kodi.get_setting('error_notify') == "true":
kodi.notify(header='Putlocker Movies',msg='(error) %s %s' % (str(e), ''),duration=5000,sound=None)
return hosters
| gpl-2.0 | 3,591,408,890,174,926,000 | 40.033943 | 197 | 0.563311 | false | 3.40026 | false | false | false |
melodous/designate | designate/sqlalchemy/models.py | 1 | 1881 | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Patrick Galbraith <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.db.sqlalchemy import models
from oslo.db import exception as oslo_db_exc
from sqlalchemy import Column, DateTime
from sqlalchemy.exc import IntegrityError
from sqlalchemy.types import CHAR
from designate.openstack.common import timeutils
from designate import exceptions
class Base(models.ModelBase):
# TODO(ekarlso): Remove me when o.db patch lands for this.
def save(self, session):
"""Save this object"""
session.add(self)
try:
session.flush()
except oslo_db_exc.DBDuplicateEntry as e:
raise exceptions.Duplicate(str(e))
except IntegrityError:
raise
def delete(self, session):
session.delete(self)
session.flush()
# TODO(ekarlso): Get this into o.db?
class SoftDeleteMixin(object):
deleted = Column(CHAR(32), nullable=False, default="0", server_default="0")
deleted_at = Column(DateTime, nullable=True, default=None)
def soft_delete(self, session):
"""Mark this object as deleted."""
self.deleted = self.id.replace('-', '')
self.deleted_at = timeutils.utcnow()
if hasattr(self, 'status'):
self.status = "DELETED"
self.save(session=session)
| apache-2.0 | -4,693,961,072,000,867,000 | 32 | 79 | 0.694312 | false | 4.036481 | false | false | false |
biocore/verman | verman/__init__.py | 1 | 9290 | #!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013, The BiPy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
__credits__ = ["Daniel McDonald", "Jai Ram Rideout", "Yoshiki Vazquez Baeza"]
import os
import subprocess
class Version(object):
"""Represent module version information
This is inspired by Python's sys.version_info
"""
def __init__(self, package, major, minor, micro=None, releaselevel=None, init_file=None):
if not isinstance(package, str):
raise TypeError("Package must be a string")
if not isinstance(major, int):
raise TypeError("Major version must be an integer")
if not isinstance(minor, int):
raise TypeError("Minor version must be an integer")
if micro is not None and not isinstance(micro, int):
raise TypeError("Micro version must be an integer")
if releaselevel is not None and not isinstance(releaselevel, str):
raise TypeError("Releaselevel must be a string")
if init_file is not None and not os.path.exists(init_file):
raise ValueError("init_file must exist if provided")
self.package = package
self.major = major
self.minor = minor
self.micro = micro
self.releaselevel = releaselevel
self.init_file = init_file
@property
def mmm(self):
"""major.minor.micro version string"""
if self.micro is None:
return "%d.%d" % (self.major, self.minor)
else:
return "%d.%d.%d" % (self.major, self.minor, self.micro)
def __str__(self):
"""Return a version string"""
if self.micro is None:
base = "%d.%d" % (self.major, self.minor)
else:
base = "%d.%d.%d" % (self.major, self.minor, self.micro)
if self.releaselevel is not None:
base = "%s-%s" % (base, self.releaselevel)
git_branch = self.git_branch()
git_sha1 = self.git_sha1()
if git_branch is not None:
return "%s, %s@%s" % (base, git_branch, git_sha1)
else:
return base
def __repr__(self):
"""Return version information similar to Python's sys.version_info"""
name = "%s_version" % self.package
major = "major=%d" % self.major
minor = "minor=%d" % self.minor
items = [major, minor]
if self.micro is not None:
items.append("micro=%s" % self.micro)
if self.releaselevel is not None:
items.append("releaselevel='%s'" % self.releaselevel)
git_branch = self.git_branch()
git_sha1 = self.git_sha1(truncate=False)
if git_branch is not None:
git_branch = "git_branch='%s'" % git_branch
git_sha1 = "git_sha1='%s'" % git_sha1
items.append(git_branch)
items.append(git_sha1)
return "%s(%s)" % (name, ', '.join(items))
def git_branch(self):
"""Get the current branch (if applicable)
This code was adapted from QIIME. The author, Yoshiki Vazquez Baeza has
given explicit permission for this code to be licensed under BSD. The
discussion can be found here https://github.com/wasade/verman/issues/1
"""
if self.init_file is None:
return None
pkg_dir = self.package_dir()
branch_cmd = 'git --git-dir %s/.git rev-parse --abbrev-ref HEAD' %\
(pkg_dir)
branch_o, branch_e, branch_r = self.verman_system_call(branch_cmd)
git_branch = branch_o.strip()
if self._is_valid_git_refname(git_branch):
return git_branch
else:
return None
def git_sha1(self, truncate=True):
"""Get the current git SHA1 (if applicable)
This code was adapted from QIIME. The author, Yoshiki Vazquez Baeza has
given explicit permission for this code to be licensed under BSD. The
discussion can be found here https://github.com/wasade/verman/issues/1
"""
if self.init_file is None:
return None
pkg_dir = self.package_dir()
sha_cmd = 'git --git-dir %s/.git rev-parse HEAD' % (pkg_dir)
sha_o, sha_e, sha_r = self.verman_system_call(sha_cmd)
git_sha = sha_o.strip()
if self._is_valid_git_sha1(git_sha):
if truncate:
return git_sha[0:7]
else:
return git_sha
else:
return None
def _is_valid_git_refname(self, refname):
"""check if a string is a valid branch-name/ref-name for git
Input:
refname: string to validate
Output:
True if 'refname' is a valid branch name in git. False if it fails to
meet any of the criteria described in the man page for
'git check-ref-format', also see:
http://www.kernel.org/pub/software/scm/git/docs/git-check-ref-format.html
This code was adapted from QIIME. The author, Yoshiki Vazquez Baeza has
given explicit permission for this code to be licensed under BSD. The
discussion can be found here https://github.com/wasade/verman/issues/1
"""
if len(refname) == 0:
return False
# git imposes a few requirements to accept a string as a
# refname/branch-name
# They can include slash / for hierarchical (directory) grouping, but no
# slash-separated component can begin with a dot . or end with the
# sequence .lock
if (len([True for element in refname.split('/')
if element.startswith('.') or element.endswith('.lock')]) != 0):
return False
# They cannot have two consecutive dots .. anywhere
if '..' in refname:
return False
# They cannot have ASCII control characters (i.e. bytes whose values are
# lower than \040, or \177 DEL), space, tilde, caret ^, or colon :
# anywhere
if len([True for refname_char in refname if ord(refname_char) < 40 or
ord(refname_char) == 177]) != 0:
return False
if ' ' in refname or '~' in refname or '^' in refname or ':' in refname:
return False
# They cannot have question-mark ?, asterisk *, or open bracket [
# anywhere
if '?' in refname or '*' in refname or '[' in refname:
return False
# They cannot begin or end with a slash / or contain multiple
# consecutive slashes
if refname.startswith('/') or refname.endswith('/') or '//' in refname:
return False
# They cannot end with a dot ..
if refname.endswith('.'):
return False
# They cannot contain a sequence @{
if '@{' in refname:
return False
# They cannot contain a \
if '\\' in refname:
return False
return True
def _is_valid_git_sha1(self, possible_hash):
"""check if a string is a valid git sha1 string
Input:
possible_hash: string to validate
Output:
True if the string has 40 characters and is an hexadecimal number, False
otherwise.
This code was adapted from QIIME. The author, Yoshiki Vazquez Baeza has
given explicit permission for this code to be licensed under BSD. The
discussion can be found here https://github.com/wasade/verman/issues/1
"""
if len(possible_hash) != 40:
return False
try:
_ = int(possible_hash, 16)
except ValueError:
return False
return True
def package_dir(self):
"""Returns the top-level package directory
This code was adapted from QIIME. The author, Greg Caporaso, has given
explicit permission for this code to be licensed under BSD. The
discussion can be found here: https://github.com/wasade/verman/issues/1
"""
# Get the full path of the module containing an instance of Version
if self.init_file is None:
return None
current_file_path = os.path.abspath(self.init_file)
# Get the directory
current_dir_path = os.path.dirname(current_file_path)
# Return the directory containing the directory containing the instance
return os.path.dirname(current_dir_path)
def verman_system_call(self, cmd):
"""Issue a system call
This code is based off of pyqi's pyqi_system_call
"""
proc = subprocess.Popen(cmd, shell=True, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
verman_version = Version("verman", 1, 1, 1, init_file=__file__)
__version__ = verman_version.mmm
| bsd-3-clause | -6,532,921,692,975,337,000 | 33.535316 | 93 | 0.583423 | false | 4.117908 | false | false | false |
CCSS-CZ/layman | server/tests/layedtest.py | 1 | 3969 | import os,sys
import unittest
import ConfigParser
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
INSTALL_DIR = os.path.abspath(os.path.join(TEST_DIR,".."))
sys.path.append(os.path.join(INSTALL_DIR))
import json
from layman.layed import LayEd
from layman.layed import GsRest
class LayEdTestCase(unittest.TestCase):
"""Test of the auth module"""
le = None # LayEd
workdir = None
cfg = None
def setUp(self):
cfg = ConfigParser.SafeConfigParser()
cfg.read((os.path.join(TEST_DIR,"tests.cfg")))
cfg.set("FileMan","testdir",TEST_DIR)
self.le = LayEd(cfg)
self.gsr = GsRest(cfg)
self.config = cfg
self.workdir = os.path.abspath(os.path.join(TEST_DIR,"workdir","data"))
# TODO: add tests for POST /layed?myLayer
def test_01_publish(self):
# ff = "world_cities_point.shp" # file
# ll = "world_cities_point" # layer
# st = "world_cities_point" # style
# ff = "pest.shp" # file
# ll = "pest" # layer
# st = "pest" # style
ff = "line_crs.shp" # file
ll = "line_crs" # layer
st = "line_crs" # style
ws = "mis" # workspace
ds = "testschema" # datastore
sch = "testschema" # schema
# Check #
# Check if the layer is not already there
(head, cont) = self.gsr.getLayer(ws, ll)
self.assertNotEquals("200", head["status"], "The layer already exists. Please, remove it manually." )
# Check if the style is not already there
(head, cont) = self.gsr.getStyle(ws, st)
self.assertNotEquals("200", head["status"], "The style already exists. Please, remove it manually." )
# Publish #
self.le.publish(fsUserDir=self.workdir, fsGroupDir="", dbSchema=ds, gsWorkspace=ws, fileName=ff)
# Test #
# Check if the layer is there
(head, cont) = self.gsr.getLayer(ws, ll)
self.assertEquals("200", head["status"], "The layer is not there. Was it created under another name?")
# Check the style of the layer
layerJson = json.loads(cont)
styleName = layerJson["layer"]["defaultStyle"]["name"]
self.assertEquals(st, styleName, "The layer is there, but it has wrong style assinged.")
# Check if the style is there
(head, cont) = self.gsr.getStyle(ws, st)
self.assertEquals("200", head["status"], "The style is not there." )
#def test_02_delete(self):
# Checks #
# Check that the layer is there
#(head, cont) = self.gsr.getLayer("dragouni", "line_crs")
#self.assertEquals("200", head["status"], "The layer line_crs is not there. Was it created under another name?")
# Check that the style is there
#(head, cont) = self.gsr.getStyle("dragouni", "line_crs")
#self.assertEquals("200", head["status"], "The style line_crs is not there." )
# Delete #
# Delete layer (including feature type, style and datastore)
#self.le.deleteLayer(workspace="dragouni", layer="line_crs", deleteStore=True)
# Test #
# Check that the layer is not there
#(head, cont) = self.gsr.getLayer("dragouni", "line_crs")
#self.assertNotEquals("200", head["status"], "The layer line_crs still exists, should be already deleted." )
# Check that the style is not there
#(head, cont) = self.gsr.getStyle("dragouni", "line_crs")
#self.assertNotEquals("200", head["status"], "The style line_crs already exists, should be already deleted." )
# Check that the data store is not there
#(head, cont) = self.gsr.getDataStore("dragouni", "line_crs")
#self.assertNotEquals("200", head["status"], "The data store line_crs already exists, should be already deleted." )
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(LayEdTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-3.0 | -507,578,213,640,657,300 | 35.75 | 123 | 0.615772 | false | 3.595109 | true | false | false |
bugsnag/bugsnag-python | bugsnag/sessiontracker.py | 1 | 4934 | from copy import deepcopy
from uuid import uuid4
from time import strftime, gmtime
from threading import Lock, Timer
from typing import List, Dict, Callable
import atexit
try:
from contextvars import ContextVar
_session_info = ContextVar('bugsnag-session', default={}) # type: ignore
except ImportError:
from bugsnag.utils import ThreadContextVar
# flake8: noqa
_session_info = ThreadContextVar('bugsnag-session', default={}) # type: ignore
from bugsnag.utils import package_version, FilterDict, SanitizingJSONEncoder
from bugsnag.event import Event
__all__ = [] # type: List[str]
class SessionTracker:
MAXIMUM_SESSION_COUNT = 100
SESSION_PAYLOAD_VERSION = "1.0"
"""
Session tracking class for Bugsnag
"""
def __init__(self, configuration):
self.session_counts = {} # type: Dict[str, int]
self.config = configuration
self.mutex = Lock()
self.auto_sessions = False
self.delivery_thread = None
def start_session(self):
if not self.auto_sessions and self.config.auto_capture_sessions:
self.auto_sessions = True
self.__start_delivery()
start_time = strftime('%Y-%m-%dT%H:%M:00', gmtime())
new_session = {
'id': uuid4().hex,
'startedAt': start_time,
'events': {
'handled': 0,
'unhandled': 0
}
}
_session_info.set(new_session)
self.__queue_session(start_time)
def send_sessions(self):
self.mutex.acquire()
try:
sessions = []
for min_time, count in self.session_counts.items():
sessions.append({
'startedAt': min_time,
'sessionsStarted': count
})
self.session_counts = {}
finally:
self.mutex.release()
self.__deliver(sessions)
def __start_delivery(self):
if self.delivery_thread is None:
def deliver():
self.send_sessions()
self.delivery_thread = Timer(30.0, deliver)
self.delivery_thread.daemon = True
self.delivery_thread.start()
self.delivery_thread = Timer(30.0, deliver)
self.delivery_thread.daemon = True
self.delivery_thread.start()
def cleanup():
if self.delivery_thread is not None:
self.delivery_thread.cancel()
self.send_sessions()
atexit.register(cleanup)
def __queue_session(self, start_time: str):
self.mutex.acquire()
try:
if start_time not in self.session_counts:
self.session_counts[start_time] = 0
self.session_counts[start_time] += 1
finally:
self.mutex.release()
def __deliver(self, sessions: List[Dict]):
if not sessions:
self.config.logger.debug("No sessions to deliver")
return
if not self.config.api_key:
self.config.logger.debug(
"Not delivering due to an invalid api_key"
)
return
if not self.config.should_notify():
self.config.logger.debug("Not delivering due to release_stages")
return
notifier_version = package_version('bugsnag') or 'unknown'
payload = {
'notifier': {
'name': Event.NOTIFIER_NAME,
'url': Event.NOTIFIER_URL,
'version': notifier_version
},
'device': FilterDict({
'hostname': self.config.hostname,
'runtimeVersions': self.config.runtime_versions
}),
'app': {
'releaseStage': self.config.release_stage,
'version': self.config.app_version
},
'sessionCounts': sessions
}
try:
encoder = SanitizingJSONEncoder(
self.config.logger,
separators=(',', ':'),
keyword_filters=self.config.params_filters
)
encoded_payload = encoder.encode(payload)
self.config.delivery.deliver_sessions(self.config, encoded_payload)
except Exception as e:
self.config.logger.exception('Sending sessions failed %s', e)
class SessionMiddleware:
"""
Session middleware ensures that a session is appended to the event.
"""
def __init__(self, bugsnag: Callable[[Event], Callable]):
self.bugsnag = bugsnag
def __call__(self, event: Event):
session = _session_info.get()
if session:
if event.unhandled:
session['events']['unhandled'] += 1
else:
session['events']['handled'] += 1
event.session = deepcopy(session)
self.bugsnag(event)
| mit | 8,336,604,068,259,510,000 | 30.031447 | 83 | 0.551074 | false | 4.397504 | true | false | false |
BirchJD/RPiTimer | PiTimer_Step-4/Schedule.py | 1 | 5941 | # PiTimer - Python Hardware Programming Education Project For Raspberry Pi
# Copyright (C) 2015 Jason Birch
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#/****************************************************************************/
#/* PiTimer - Step 4 - Schedule functions. */
#/* ------------------------------------------------------------------------ */
#/* V1.00 - 2015-07-04 - Jason Birch */
#/* ------------------------------------------------------------------------ */
#/* Class to handle scheduling events for specific relays. Such as adding, */
#/* removing, displaying, sorting. */
#/****************************************************************************/
import datetime
import ScheduleItem
#/****************************************************************************/
#/* Function to return the schedule date of a schedule item for when sorting */
#/* the items using the Python list sort feature. */
#/****************************************************************************/
def SortGetKey(Object):
return Object.GetScheduleDate()
class Schedule:
def __init__(self):
# Define an array to store the schedule items in.
self.ScheduleItems = []
#/*********************************************/
#/* Get the item at the specific array index. */
#/*********************************************/
def GetItem(self, ItemIndex):
if len(self.ScheduleItems) > ItemIndex:
return self.ScheduleItems[ItemIndex]
else:
return False
#/**************************************************/
#/* Find the schedule item with the specificed ID. */
#/**************************************************/
def FindItem(self, FindItemID):
ThisItem = False
for ThisScheduleItem in self.ScheduleItems:
if ThisScheduleItem.GetItemID() == FindItemID:
ThisItem = ThisScheduleItem
return ThisItem
#/*******************************************************/
#/* Function to display the current schedule of events. */
#/* In a tabulated form. */
#/*******************************************************/
def DisplaySchedule(self, SelectedItemID):
if len(self.ScheduleItems):
self.ScheduleItems[0].DisplayHeader()
for ThisScheduleItem in self.ScheduleItems:
if SelectedItemID == ThisScheduleItem.GetItemID():
SelectLeftChar = ">"
SelectRightChar = "<"
else:
SelectLeftChar = " "
SelectRightChar = " "
ThisScheduleItem.DisplayItem(SelectLeftChar, SelectRightChar)
if len(self.ScheduleItems):
self.ScheduleItems[0].DisplayFooter()
#/*************************************************/
#/* Add a new schedule item to the schedle array. */
#/*************************************************/
def AddSchedule(self, NewRelayNumber, NewScheduleDate, NewRelayState, NewRepeat):
self.ScheduleItems.append(ScheduleItem.ScheduleItem(NewRelayNumber, NewScheduleDate, NewRelayState, NewRepeat))
self.SortSchedule()
#/**************************************************/
#/* Delete a schedule item from the schedle array. */
#/**************************************************/
def DelSchedule(self, ItemID):
ThisScheduleItem = self.FindItem(ItemID)
if ThisScheduleItem:
self.ScheduleItems.remove(ThisScheduleItem)
self.SortSchedule()
#/*********************************************/
#/* Sort the list of schedule items so the */
#/* expired items are at the top of the list. */
#/*********************************************/
def SortSchedule(self):
self.ScheduleItems.sort(key=SortGetKey)
#/*************************************************************************/
#/* If the top schedule item is in the past return it's ID as being */
#/* triggered. The schedule items are kept in date order, so the top item */
#/* is the one which will trigger first. The calling function is */
#/* responsible for removing the triggered item from the scheduled items */
#/* or updating the scheduled item if the item is to be repeated once */
#/* the calling function has processed it; by calling the function: */
#/* SetNextScheduleDate(). */
#/*************************************************************************/
def ScheduleTrigger(self):
ThisItemID = False
Now = datetime.datetime.now()
ThisItem = self.GetItem(0)
if ThisItem and ThisItem.GetScheduleDate() <= Now:
ThisItemID = ThisItem.GetItemID()
return ThisItemID
#/*********************************************************************/
#/* Set the date and time of the specified schedule item to it's next */
#/* trigger date/time. If the item does not have a repeat period, */
#/* remove the schedule item. */
#/*********************************************************************/
def SetNextScheduleDate(self, ThisItemID):
ThisItem = self.FindItem(ThisItemID)
if ThisItem and ThisItem.SetNextScheduleDate() == False:
self.DelSchedule(ThisItemID)
self.SortSchedule()
| gpl-3.0 | -9,058,749,069,529,155,000 | 40.838028 | 117 | 0.497391 | false | 4.837948 | false | false | false |
dweisz/pydolphot | make_fakerun.py | 1 | 2967 | import numpy as np
import sys
import subprocess
import os
'''
def makephotfiles(base, nstart, nruns, nimages):
for i in range(nstart,nstart+nruns):
for j in range(1, nimages+1):
subprocess.call("ln -s "+base+"."+np.str(j)+".res.fits " + base+"_"+np.str(i)+"."+np.str(j)+".res.fits", shell=True)
subprocess.call("ln -s "+base+"."+np.str(j)+".psf.fits " + base+"_"+np.str(i)+"."+np.str(j)+".psf.fits", shell=True)
subprocess.call("ln -s "+base+".info " + base+"_"+np.str(i)+".info", shell=True)
subprocess.call("ln -s "+base+".apcor " + base+"_"+np.str(i)+".apcor", shell=True)
subprocess.call("ln -s "+base+".psfs " + base+"_"+np.str(i)+".psfs", shell=True)
subprocess.call("ln -s "+base+".columns " + base+"_"+np.str(i)+".columns", shell=True)
subprocess.call("ln -s "+base + " " + base+"_"+np.str(i), shell=True)
'''
def makefakelist(photfile, filter1, filter2, fmin, fmax, cmin, cmax, nruns, nstars=15000, nstart=1):
for i in range(nstart, nstart+nruns):
subprocess.call('fakelist '+ np.str(photfile) + ' ' + np.str(filter1) + ' ' + np.str(filter2) + ' ' + np.str(fmin) + ' ' + np.str(fmax) + ' ' + np.str(cmin) + ' ' + np.str(cmax) + ' ' + "-nstar=" + np.str(nstars) + "> fake.list_" + np.str(i), shell=True)
subprocess.call('sleep 5', shell=True )
def makefakeparam(param_file, base, nruns, nstart=1):
infile = param_file
for i in range(nstart, nstart+nruns):
fakeparam = "phot.fake_"+np.str(i)+".param"
subprocess.call("cp "+infile+" "+fakeparam, shell=True)
outfile = fakeparam
f1 = open(fakeparam, 'a')
f1.write("ACSuseCTE = 1\n")
f1.write("WFC3useCTE = 1\n")
f1.write("RandomFake = 1\n")
f1.write("FakeMatch=3.0\n")
f1.write("FakePad=0\n")
f1.write("FakeStarPSF = 1.5\n")
f1.write("FakeOut="+base+"_fake_"+np.str(i)+".fake\n")
f1.write("FakeStars=fake.list_"+np.str(i)+"\n")
f1.close()
def makerunfake(param_file, base, nruns, nstart=1):
for i in range(nstart, nstart+nruns):
fakeparam = "phot.fake_"+np.str(i)+".param"
outfile = "runfake"+np.str(i)
f = open(outfile, 'w')
f.write("cd " + os.getcwd()+"\n")
f.write("dolphot " + base+ " -p" + fakeparam + " >> fake.log_"+np.str(i))
f.close()
subprocess.call("chmod +x " + outfile, shell=True)
'''
cd /clusterfs/dweisz/photometry/leop/
dolphot leop_acs.phot_1 -pleop.fake.param_1 >> fake1.log
'''
#if __name__ == '__main__':
base = sys.argv[1] # e.g., test.phot
#rundir = sys.argv[2]
#nimages = np.int(sys.argv[3])
#name = sys.argv[3]
param_file = sys.argv[2] # name of photometry parameter file
nruns = np.int(sys.argv[3])
filters = sys.argv[4]
f1min = np.float(sys.argv[5])
f1max = np.float(sys.argv[6])
c1min = np.float(sys.argv[7])
c1max = np.float(sys.argv[8])
#nimages = 12
#nruns = 72
#makephotfiles(base, 1, nruns , nimages)
makefakeparam(param_file, base, nruns)
makerunfake(param_file, base, nruns)
makefakelist(base, filters.split()[0], filters.split()[1], f1min, f1max, c1min, c1max, nruns)
#main()
| mit | 4,988,895,559,438,587,000 | 31.25 | 256 | 0.624874 | false | 2.345455 | false | false | false |
collab-project/luma.cryptocurrency | luma/cryptocurrency/endpoint/coinmarketcap.py | 1 | 1134 | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Thijs Triemstra and contributors
# See LICENSE.rst for details.
"""
Endpoint for coinmarketcap.com
:see: https://coinmarketcap.com/api/
"""
from datetime import datetime
from dateutil.tz.tz import tzutc
from . import Endpoint, EndpointResponse
class CoinmarketcapResponse(EndpointResponse):
@property
def data(self):
return self.json_data[0]
def parse_price(self):
return float(self.data.get('price_{}'.format(
self.currency_code.lower())))
def parse_price_in_btc(self):
return float(self.data.get('price_btc'))
def parse_timestamp(self):
return datetime.fromtimestamp(
int(self.data.get('last_updated')), tz=tzutc())
class Coinmarketcap(Endpoint):
responseType = CoinmarketcapResponse
def get_url(self):
base = 'https://api.coinmarketcap.com/{api_version}/ticker/{coin}/'
if self.currency_code != 'USD':
base += '?convert={}'.format(self.currency_code)
return base.format(
api_version=self.api_version,
coin=self.coin
)
| mit | 1,155,416,785,745,923,300 | 22.625 | 75 | 0.640212 | false | 3.623003 | false | false | false |
rohitwaghchaure/New_Theme_Erp | erpnext/stock/doctype/stock_entry/stock_entry.py | 1 | 34617 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from frappe.utils import cstr, cint, flt, comma_or, nowdate
from frappe import _
from erpnext.stock.utils import get_incoming_rate
from erpnext.stock.stock_ledger import get_previous_sle
from erpnext.controllers.queries import get_match_cond
from erpnext.stock.get_item_details import get_available_qty
class NotUpdateStockError(frappe.ValidationError): pass
class StockOverReturnError(frappe.ValidationError): pass
class IncorrectValuationRateError(frappe.ValidationError): pass
class DuplicateEntryForProductionOrderError(frappe.ValidationError): pass
from erpnext.controllers.stock_controller import StockController
form_grid_templates = {
"mtn_details": "templates/form_grid/stock_entry_grid.html"
}
class StockEntry(StockController):
fname = 'mtn_details'
def onload(self):
if self.docstatus==1:
for item in self.get(self.fname):
item.update(get_available_qty(item.item_code,
item.s_warehouse))
def validate(self):
self.validate_posting_time()
self.validate_purpose()
pro_obj = self.production_order and \
frappe.get_doc('Production Order', self.production_order) or None
self.set_transfer_qty()
self.validate_item()
self.validate_uom_is_integer("uom", "qty")
self.validate_uom_is_integer("stock_uom", "transfer_qty")
self.validate_warehouse(pro_obj)
self.validate_production_order(pro_obj)
self.get_stock_and_rate()
self.validate_incoming_rate()
self.validate_bom()
self.validate_finished_goods()
self.validate_return_reference_doc()
self.validate_with_material_request()
self.validate_fiscal_year()
self.validate_valuation_rate()
self.set_total_amount()
def on_submit(self):
from erpnext.stock.stock_custom_methods import validate_for_si_submitted
validate_for_si_submitted(self)
self.update_stock_ledger()
from erpnext.stock.doctype.serial_no.serial_no import update_serial_nos_after_submit
update_serial_nos_after_submit(self, "mtn_details")
self.update_production_order()
self.make_gl_entries()
def on_cancel(self):
self.update_stock_ledger()
self.update_production_order()
self.make_gl_entries_on_cancel()
def validate_fiscal_year(self):
from erpnext.accounts.utils import validate_fiscal_year
validate_fiscal_year(self.posting_date, self.fiscal_year,
self.meta.get_label("posting_date"))
def validate_purpose(self):
valid_purposes = ["Material Issue", "Material Receipt", "Material Transfer",
"Manufacture/Repack", "Subcontract", "Sales Return", "Purchase Return"]
if self.purpose not in valid_purposes:
frappe.throw(_("Purpose must be one of {0}").format(comma_or(valid_purposes)))
def set_transfer_qty(self):
for item in self.get("mtn_details"):
if not flt(item.qty):
frappe.throw(_("Row {0}: Qty is mandatory").format(item.idx))
item.transfer_qty = flt(item.qty * item.conversion_factor, self.precision("transfer_qty", item))
def validate_item(self):
stock_items = self.get_stock_items()
serialized_items = self.get_serialized_items()
for item in self.get("mtn_details"):
if item.item_code not in stock_items:
frappe.throw(_("{0} is not a stock Item").format(item.item_code))
if not item.stock_uom:
item.stock_uom = frappe.db.get_value("Item", item.item_code, "stock_uom")
if not item.uom:
item.uom = item.stock_uom
if not item.conversion_factor:
item.conversion_factor = 1
if not item.transfer_qty:
item.transfer_qty = item.qty * item.conversion_factor
if (self.purpose in ("Material Transfer", "Sales Return", "Purchase Return")
and not item.serial_no
and item.item_code in serialized_items):
frappe.throw(_("Row #{0}: Please specify Serial No for Item {1}").format(item.idx, item.item_code),
frappe.MandatoryError)
def validate_warehouse(self, pro_obj):
"""perform various (sometimes conditional) validations on warehouse"""
source_mandatory = ["Material Issue", "Material Transfer", "Purchase Return"]
target_mandatory = ["Material Receipt", "Material Transfer", "Sales Return"]
validate_for_manufacture_repack = any([d.bom_no for d in self.get("mtn_details")])
if self.purpose in source_mandatory and self.purpose not in target_mandatory:
self.to_warehouse = None
for d in self.get('mtn_details'):
d.t_warehouse = None
elif self.purpose in target_mandatory and self.purpose not in source_mandatory:
self.from_warehouse = None
for d in self.get('mtn_details'):
d.s_warehouse = None
for d in self.get('mtn_details'):
if not d.s_warehouse and not d.t_warehouse:
d.s_warehouse = self.from_warehouse
d.t_warehouse = self.to_warehouse
if not (d.s_warehouse or d.t_warehouse):
frappe.throw(_("Atleast one warehouse is mandatory"))
if self.purpose in source_mandatory and not d.s_warehouse:
frappe.throw(_("Source warehouse is mandatory for row {0}").format(d.idx))
if self.purpose in target_mandatory and not d.t_warehouse:
frappe.throw(_("Target warehouse is mandatory for row {0}").format(d.idx))
if self.purpose == "Manufacture/Repack":
if validate_for_manufacture_repack:
if d.bom_no:
d.s_warehouse = None
if not d.t_warehouse:
frappe.throw(_("Target warehouse is mandatory for row {0}").format(d.idx))
elif pro_obj and cstr(d.t_warehouse) != pro_obj.fg_warehouse:
frappe.throw(_("Target warehouse in row {0} must be same as Production Order").format(d.idx))
else:
d.t_warehouse = None
if not d.s_warehouse:
frappe.throw(_("Source warehouse is mandatory for row {0}").format(d.idx))
if cstr(d.s_warehouse) == cstr(d.t_warehouse):
frappe.throw(_("Source and target warehouse cannot be same for row {0}").format(d.idx))
def validate_production_order(self, pro_obj=None):
if not pro_obj:
if self.production_order:
pro_obj = frappe.get_doc('Production Order', self.production_order)
else:
return
if self.purpose == "Manufacture/Repack":
# check for double entry
self.check_duplicate_entry_for_production_order()
elif self.purpose != "Material Transfer":
self.production_order = None
def check_duplicate_entry_for_production_order(self):
other_ste = [t[0] for t in frappe.db.get_values("Stock Entry", {
"production_order": self.production_order,
"purpose": self.purpose,
"docstatus": ["!=", 2],
"name": ["!=", self.name]
}, "name")]
if other_ste:
production_item, qty = frappe.db.get_value("Production Order",
self.production_order, ["production_item", "qty"])
args = other_ste + [production_item]
fg_qty_already_entered = frappe.db.sql("""select sum(transfer_qty)
from `tabStock Entry Detail`
where parent in (%s)
and item_code = %s
and ifnull(s_warehouse,'')='' """ % (", ".join(["%s" * len(other_ste)]), "%s"), args)[0][0]
if fg_qty_already_entered >= qty:
frappe.throw(_("Stock Entries already created for Production Order ")
+ self.production_order + ":" + ", ".join(other_ste), DuplicateEntryForProductionOrderError)
def validate_valuation_rate(self):
if self.purpose == "Manufacture/Repack":
valuation_at_source, valuation_at_target = 0, 0
for d in self.get("mtn_details"):
if d.s_warehouse and not d.t_warehouse:
valuation_at_source += flt(d.amount)
if d.t_warehouse and not d.s_warehouse:
valuation_at_target += flt(d.amount)
if valuation_at_target < valuation_at_source:
frappe.throw(_("Total valuation for manufactured or repacked item(s) can not be less than total valuation of raw materials"))
def set_total_amount(self):
self.total_amount = sum([flt(item.amount) for item in self.get("mtn_details")])
def get_stock_and_rate(self, force=False):
"""get stock and incoming rate on posting date"""
raw_material_cost = 0.0
if not self.posting_date or not self.posting_time:
frappe.throw(_("Posting date and posting time is mandatory"))
allow_negative_stock = cint(frappe.db.get_default("allow_negative_stock"))
for d in self.get('mtn_details'):
args = frappe._dict({
"item_code": d.item_code,
"warehouse": d.s_warehouse or d.t_warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time,
"qty": d.s_warehouse and -1*d.transfer_qty or d.transfer_qty,
"serial_no": d.serial_no
})
# get actual stock at source warehouse
d.actual_qty = get_previous_sle(args).get("qty_after_transaction") or 0
# validate qty during submit
if d.docstatus==1 and d.s_warehouse and not allow_negative_stock and d.actual_qty < d.transfer_qty:
frappe.throw(_("""Row {0}: Qty not avalable in warehouse {1} on {2} {3}.
Available Qty: {4}, Transfer Qty: {5}""").format(d.idx, d.s_warehouse,
self.posting_date, self.posting_time, d.actual_qty, d.transfer_qty))
# get incoming rate
if not d.bom_no:
if not flt(d.incoming_rate) or d.s_warehouse or self.purpose == "Sales Return" or force:
incoming_rate = flt(self.get_incoming_rate(args), self.precision("incoming_rate", d))
if incoming_rate > 0:
d.incoming_rate = incoming_rate
d.amount = flt(d.transfer_qty) * flt(d.incoming_rate)
if not d.t_warehouse:
raw_material_cost += flt(d.amount)
# set incoming rate for fg item
if self.purpose == "Manufacture/Repack":
number_of_fg_items = len([t.t_warehouse for t in self.get("mtn_details") if t.t_warehouse])
for d in self.get("mtn_details"):
if d.bom_no or (d.t_warehouse and number_of_fg_items == 1):
if not flt(d.incoming_rate) or force:
operation_cost_per_unit = 0
if d.bom_no:
bom = frappe.db.get_value("BOM", d.bom_no, ["operating_cost", "quantity"], as_dict=1)
operation_cost_per_unit = flt(bom.operating_cost) / flt(bom.quantity)
d.incoming_rate = operation_cost_per_unit + (raw_material_cost / flt(d.transfer_qty))
d.amount = flt(d.transfer_qty) * flt(d.incoming_rate)
break
def get_incoming_rate(self, args):
incoming_rate = 0
if self.purpose == "Sales Return":
incoming_rate = self.get_incoming_rate_for_sales_return(args)
else:
incoming_rate = get_incoming_rate(args)
return incoming_rate
def get_incoming_rate_for_sales_return(self, args):
incoming_rate = 0.0
if (self.delivery_note_no or self.sales_invoice_no) and args.get("item_code"):
incoming_rate = frappe.db.sql("""select abs(ifnull(stock_value_difference, 0) / actual_qty)
from `tabStock Ledger Entry`
where voucher_type = %s and voucher_no = %s and item_code = %s limit 1""",
((self.delivery_note_no and "Delivery Note" or "Sales Invoice"),
self.delivery_note_no or self.sales_invoice_no, args.item_code))
incoming_rate = incoming_rate[0][0] if incoming_rate else 0.0
return incoming_rate
def validate_incoming_rate(self):
for d in self.get('mtn_details'):
if d.t_warehouse:
self.validate_value("incoming_rate", ">", 0, d, raise_exception=IncorrectValuationRateError)
def validate_bom(self):
for d in self.get('mtn_details'):
if d.bom_no and not frappe.db.sql("""select name from `tabBOM`
where item = %s and name = %s and docstatus = 1 and is_active = 1""",
(d.item_code, d.bom_no)):
frappe.throw(_("BOM {0} is not submitted or inactive BOM for Item {1}").format(d.bom_no, d.item_code))
def validate_finished_goods(self):
"""validation: finished good quantity should be same as manufacturing quantity"""
for d in self.get('mtn_details'):
if d.bom_no and flt(d.transfer_qty) != flt(self.fg_completed_qty):
frappe.throw(_("Quantity in row {0} ({1}) must be same as manufactured quantity {2}").format(d.idx, d.transfer_qty, self.fg_completed_qty))
def validate_return_reference_doc(self):
"""validate item with reference doc"""
ref = get_return_doc_and_details(self)
if ref.doc:
# validate docstatus
if ref.doc.docstatus != 1:
frappe.throw(_("{0} {1} must be submitted").format(ref.doc.doctype, ref.doc.name),
frappe.InvalidStatusError)
# update stock check
if ref.doc.doctype == "Sales Invoice" and cint(ref.doc.update_stock) != 1:
frappe.throw(_("'Update Stock' for Sales Invoice {0} must be set").format(ref.doc.name), NotUpdateStockError)
# posting date check
ref_posting_datetime = "%s %s" % (cstr(ref.doc.posting_date),
cstr(ref.doc.posting_time) or "00:00:00")
this_posting_datetime = "%s %s" % (cstr(self.posting_date),
cstr(self.posting_time))
if this_posting_datetime < ref_posting_datetime:
from frappe.utils.dateutils import datetime_in_user_format
frappe.throw(_("Posting timestamp must be after {0}").format(datetime_in_user_format(ref_posting_datetime)))
stock_items = get_stock_items_for_return(ref.doc, ref.parentfields)
already_returned_item_qty = self.get_already_returned_item_qty(ref.fieldname)
for item in self.get("mtn_details"):
# validate if item exists in the ref doc and that it is a stock item
if item.item_code not in stock_items:
frappe.throw(_("Item {0} does not exist in {1} {2}").format(item.item_code, ref.doc.doctype, ref.doc.name),
frappe.DoesNotExistError)
# validate quantity <= ref item's qty - qty already returned
if self.purpose == "Purchase Return":
ref_item_qty = sum([flt(d.qty)*flt(d.conversion_factor) for d in ref.doc.get({"item_code": item.item_code})])
elif self.purpose == "Sales Return":
ref_item_qty = sum([flt(d.qty) for d in ref.doc.get({"item_code": item.item_code})])
returnable_qty = ref_item_qty - flt(already_returned_item_qty.get(item.item_code))
if not returnable_qty:
frappe.throw(_("Item {0} has already been returned").format(item.item_code), StockOverReturnError)
elif item.transfer_qty > returnable_qty:
frappe.throw(_("Cannot return more than {0} for Item {1}").format(returnable_qty, item.item_code),
StockOverReturnError)
def get_already_returned_item_qty(self, ref_fieldname):
return dict(frappe.db.sql("""select item_code, sum(transfer_qty) as qty
from `tabStock Entry Detail` where parent in (
select name from `tabStock Entry` where `%s`=%s and docstatus=1)
group by item_code""" % (ref_fieldname, "%s"), (self.get(ref_fieldname),)))
def update_stock_ledger(self):
sl_entries = []
for d in self.get('mtn_details'):
if cstr(d.s_warehouse) and self.docstatus == 1:
sl_entries.append(self.get_sl_entries(d, {
"warehouse": cstr(d.s_warehouse),
"actual_qty": -flt(d.transfer_qty),
"incoming_rate": 0
}))
if cstr(d.t_warehouse):
sl_entries.append(self.get_sl_entries(d, {
"warehouse": cstr(d.t_warehouse),
"actual_qty": flt(d.transfer_qty),
"incoming_rate": flt(d.incoming_rate)
}))
# On cancellation, make stock ledger entry for
# target warehouse first, to update serial no values properly
if cstr(d.s_warehouse) and self.docstatus == 2:
sl_entries.append(self.get_sl_entries(d, {
"warehouse": cstr(d.s_warehouse),
"actual_qty": -flt(d.transfer_qty),
"incoming_rate": 0
}))
self.make_sl_entries(sl_entries, self.amended_from and 'Yes' or 'No')
def update_production_order(self):
def _validate_production_order(pro_doc):
if flt(pro_doc.docstatus) != 1:
frappe.throw(_("Production Order {0} must be submitted").format(self.production_order))
if pro_doc.status == 'Stopped':
frappe.throw(_("Transaction not allowed against stopped Production Order {0}").format(self.production_order))
if self.production_order:
pro_doc = frappe.get_doc("Production Order", self.production_order)
_validate_production_order(pro_doc)
pro_doc.run_method("update_status")
if self.purpose == "Manufacture/Repack":
pro_doc.run_method("update_produced_qty")
self.update_planned_qty(pro_doc)
def update_planned_qty(self, pro_doc):
from erpnext.stock.utils import update_bin
update_bin({
"item_code": pro_doc.production_item,
"warehouse": pro_doc.fg_warehouse,
"posting_date": self.posting_date,
"planned_qty": (self.docstatus==1 and -1 or 1 ) * flt(self.fg_completed_qty)
})
def get_item_details(self, args):
item = frappe.db.sql("""select stock_uom, description, item_name,
expense_account, buying_cost_center from `tabItem`
where name = %s and (ifnull(end_of_life,'0000-00-00')='0000-00-00' or end_of_life > now())""",
(args.get('item_code')), as_dict = 1)
if not item:
frappe.throw(_("Item {0} is not active or end of life has been reached").format(args.get("item_code")))
ret = {
'uom' : item and item[0]['stock_uom'] or '',
'stock_uom' : item and item[0]['stock_uom'] or '',
'description' : item and item[0]['description'] or '',
'item_name' : item and item[0]['item_name'] or '',
'expense_account' : args.get("expense_account") \
or frappe.db.get_value("Company", args.get("company"), "stock_adjustment_account"),
'cost_center' : item and item[0]['buying_cost_center'] or args.get("cost_center"),
'qty' : 0,
'transfer_qty' : 0,
'conversion_factor' : 1,
'batch_no' : '',
'actual_qty' : 0,
'incoming_rate' : 0
}
stock_and_rate = args.get('warehouse') and self.get_warehouse_details(args) or {}
ret.update(stock_and_rate)
return ret
def get_uom_details(self, args):
conversion_factor = frappe.db.get_value("UOM Conversion Detail", {"parent": args.get("item_code"),
"uom": args.get("uom")}, "conversion_factor")
if not conversion_factor:
frappe.msgprint(_("UOM coversion factor required for UOM: {0} in Item: {1}")
.format(args.get("uom"), args.get("item_code")))
ret = {'uom' : ''}
else:
ret = {
'conversion_factor' : flt(conversion_factor),
'transfer_qty' : flt(args.get("qty")) * flt(conversion_factor)
}
return ret
def get_warehouse_details(self, args):
ret = {}
if args.get('warehouse') and args.get('item_code'):
args.update({
"posting_date": self.posting_date,
"posting_time": self.posting_time,
})
args = frappe._dict(args)
ret = {
"actual_qty" : get_previous_sle(args).get("qty_after_transaction") or 0,
"incoming_rate" : self.get_incoming_rate(args)
}
return ret
def get_items(self):
self.set('mtn_details', [])
pro_obj = None
if self.production_order:
# common validations
pro_obj = frappe.get_doc('Production Order', self.production_order)
if pro_obj:
self.validate_production_order(pro_obj)
self.bom_no = pro_obj.bom_no
else:
# invalid production order
self.production_order = None
if self.bom_no:
if self.purpose in ["Material Issue", "Material Transfer", "Manufacture/Repack",
"Subcontract"]:
if self.production_order and self.purpose == "Material Transfer":
item_dict = self.get_pending_raw_materials(pro_obj)
else:
if not self.fg_completed_qty:
frappe.throw(_("Manufacturing Quantity is mandatory"))
item_dict = self.get_bom_raw_materials(self.fg_completed_qty)
for item in item_dict.values():
if pro_obj:
item["from_warehouse"] = pro_obj.wip_warehouse
item["to_warehouse"] = ""
# add raw materials to Stock Entry Detail table
self.add_to_stock_entry_detail(item_dict)
# add finished good item to Stock Entry Detail table -- along with bom_no
if self.production_order and self.purpose == "Manufacture/Repack":
item = frappe.db.get_value("Item", pro_obj.production_item, ["item_name",
"description", "stock_uom", "expense_account", "buying_cost_center"], as_dict=1)
self.add_to_stock_entry_detail({
cstr(pro_obj.production_item): {
"to_warehouse": pro_obj.fg_warehouse,
"from_warehouse": "",
"qty": self.fg_completed_qty,
"item_name": item.item_name,
"description": item.description,
"stock_uom": item.stock_uom,
"expense_account": item.expense_account,
"cost_center": item.buying_cost_center,
}
}, bom_no=pro_obj.bom_no)
elif self.purpose in ["Material Receipt", "Manufacture/Repack"]:
if self.purpose=="Material Receipt":
self.from_warehouse = ""
item = frappe.db.sql("""select name, item_name, description,
stock_uom, expense_account, buying_cost_center from `tabItem`
where name=(select item from tabBOM where name=%s)""",
self.bom_no, as_dict=1)
self.add_to_stock_entry_detail({
item[0]["name"] : {
"qty": self.fg_completed_qty,
"item_name": item[0].item_name,
"description": item[0]["description"],
"stock_uom": item[0]["stock_uom"],
"from_warehouse": "",
"expense_account": item[0].expense_account,
"cost_center": item[0].buying_cost_center,
}
}, bom_no=self.bom_no)
self.get_stock_and_rate()
def get_bom_raw_materials(self, qty):
from erpnext.manufacturing.doctype.bom.bom import get_bom_items_as_dict
# item dict = { item_code: {qty, description, stock_uom} }
item_dict = get_bom_items_as_dict(self.bom_no, qty=qty, fetch_exploded = self.use_multi_level_bom)
for item in item_dict.values():
item.from_warehouse = item.default_warehouse
return item_dict
def get_pending_raw_materials(self, pro_obj):
"""
issue (item quantity) that is pending to issue or desire to transfer,
whichever is less
"""
item_dict = self.get_bom_raw_materials(1)
issued_item_qty = self.get_issued_qty()
max_qty = flt(pro_obj.qty)
only_pending_fetched = []
for item in item_dict:
pending_to_issue = (max_qty * item_dict[item]["qty"]) - issued_item_qty.get(item, 0)
desire_to_transfer = flt(self.fg_completed_qty) * item_dict[item]["qty"]
if desire_to_transfer <= pending_to_issue:
item_dict[item]["qty"] = desire_to_transfer
else:
item_dict[item]["qty"] = pending_to_issue
if pending_to_issue:
only_pending_fetched.append(item)
# delete items with 0 qty
for item in item_dict.keys():
if not item_dict[item]["qty"]:
del item_dict[item]
# show some message
if not len(item_dict):
frappe.msgprint(_("""All items have already been transferred for this Production Order."""))
elif only_pending_fetched:
frappe.msgprint(_("Pending Items {0} updated").format(only_pending_fetched))
return item_dict
def get_issued_qty(self):
issued_item_qty = {}
result = frappe.db.sql("""select t1.item_code, sum(t1.qty)
from `tabStock Entry Detail` t1, `tabStock Entry` t2
where t1.parent = t2.name and t2.production_order = %s and t2.docstatus = 1
and t2.purpose = 'Material Transfer'
group by t1.item_code""", self.production_order)
for t in result:
issued_item_qty[t[0]] = flt(t[1])
return issued_item_qty
def add_to_stock_entry_detail(self, item_dict, bom_no=None):
expense_account, cost_center = frappe.db.get_values("Company", self.company, \
["default_expense_account", "cost_center"])[0]
for d in item_dict:
se_child = self.append('mtn_details')
se_child.s_warehouse = item_dict[d].get("from_warehouse", self.from_warehouse)
se_child.t_warehouse = item_dict[d].get("to_warehouse", self.to_warehouse)
se_child.item_code = cstr(d)
se_child.item_name = item_dict[d]["item_name"]
se_child.description = item_dict[d]["description"]
se_child.uom = item_dict[d]["stock_uom"]
se_child.stock_uom = item_dict[d]["stock_uom"]
se_child.qty = flt(item_dict[d]["qty"])
se_child.expense_account = item_dict[d]["expense_account"] or expense_account
se_child.cost_center = item_dict[d]["cost_center"] or cost_center
# in stock uom
se_child.transfer_qty = flt(item_dict[d]["qty"])
se_child.conversion_factor = 1.00
# to be assigned for finished item
se_child.bom_no = bom_no
def validate_with_material_request(self):
for item in self.get("mtn_details"):
if item.material_request:
mreq_item = frappe.db.get_value("Material Request Item",
{"name": item.material_request_item, "parent": item.material_request},
["item_code", "warehouse", "idx"], as_dict=True)
if mreq_item.item_code != item.item_code or mreq_item.warehouse != item.t_warehouse:
frappe.throw(_("Item or Warehouse for row {0} does not match Material Request").format(item.idx),
frappe.MappingMismatchError)
def get_work_orderDetails(self, work_order):
WO_details = frappe.db.get_value('Work Order', work_order, '*', as_dict=1, debug=1)
if WO_details:
return {
'sales_invoice_no' : WO_details.sales_invoice_no,
'customer_name' : WO_details.customer_name,
'trial_date' : WO_details.trial_date,
'delivery_date' : WO_details.delivery_date,
'trials' : WO_details.trial_no
}
else:
return None
@frappe.whitelist()
def get_party_details(ref_dt, ref_dn):
if ref_dt in ["Delivery Note", "Sales Invoice"]:
res = frappe.db.get_value(ref_dt, ref_dn,
["customer", "customer_name", "address_display as customer_address"], as_dict=1)
else:
res = frappe.db.get_value(ref_dt, ref_dn,
["supplier", "supplier_name", "address_display as supplier_address"], as_dict=1)
return res or {}
@frappe.whitelist()
def get_production_order_details(production_order):
result = frappe.db.sql("""select bom_no,
ifnull(qty, 0) - ifnull(produced_qty, 0) as fg_completed_qty, use_multi_level_bom,
wip_warehouse from `tabProduction Order` where name = %s""", production_order, as_dict=1)
return result and result[0] or {}
def query_sales_return_doc(doctype, txt, searchfield, start, page_len, filters):
conditions = ""
if doctype == "Sales Invoice":
conditions = "and update_stock=1"
return frappe.db.sql("""select name, customer, customer_name
from `tab%s` where docstatus = 1
and (`%s` like %%(txt)s
or `customer` like %%(txt)s) %s %s
order by name, customer, customer_name
limit %s""" % (doctype, searchfield, conditions,
get_match_cond(doctype), "%(start)s, %(page_len)s"),
{"txt": "%%%s%%" % txt, "start": start, "page_len": page_len},
as_list=True)
def query_purchase_return_doc(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, supplier, supplier_name
from `tab%s` where docstatus = 1
and (`%s` like %%(txt)s
or `supplier` like %%(txt)s) %s
order by name, supplier, supplier_name
limit %s""" % (doctype, searchfield, get_match_cond(doctype),
"%(start)s, %(page_len)s"), {"txt": "%%%s%%" % txt, "start":
start, "page_len": page_len}, as_list=True)
def query_return_item(doctype, txt, searchfield, start, page_len, filters):
txt = txt.replace("%", "")
ref = get_return_doc_and_details(filters)
stock_items = get_stock_items_for_return(ref.doc, ref.parentfields)
result = []
for item in ref.doc.get_all_children():
if getattr(item, "item_code", None) in stock_items:
item.item_name = cstr(item.item_name)
item.description = cstr(item.description)
if (txt in item.item_code) or (txt in item.item_name) or (txt in item.description):
val = [
item.item_code,
(len(item.item_name) > 40) and (item.item_name[:40] + "...") or item.item_name,
(len(item.description) > 40) and (item.description[:40] + "...") or \
item.description
]
if val not in result:
result.append(val)
return result[start:start+page_len]
def get_batch_no(doctype, txt, searchfield, start, page_len, filters):
if not filters.get("posting_date"):
filters["posting_date"] = nowdate()
batch_nos = None
args = {
'item_code': filters.get("item_code"),
's_warehouse': filters.get('s_warehouse'),
'posting_date': filters.get('posting_date'),
'txt': "%%%s%%" % txt,
'mcond':get_match_cond(doctype),
"start": start,
"page_len": page_len
}
if filters.get("s_warehouse"):
batch_nos = frappe.db.sql("""select batch_no
from `tabStock Ledger Entry` sle
where item_code = '%(item_code)s'
and warehouse = '%(s_warehouse)s'
and batch_no like '%(txt)s'
and exists(select * from `tabBatch`
where name = sle.batch_no
and (ifnull(expiry_date, '2099-12-31') >= %(posting_date)s
or expiry_date = '')
and docstatus != 2)
%(mcond)s
group by batch_no having sum(actual_qty) > 0
order by batch_no desc
limit %(start)s, %(page_len)s """
% args)
if batch_nos:
return batch_nos
else:
return frappe.db.sql("""select name from `tabBatch`
where item = '%(item_code)s'
and docstatus < 2
and (ifnull(expiry_date, '2099-12-31') >= %(posting_date)s
or expiry_date = '' or expiry_date = "0000-00-00")
%(mcond)s
order by name desc
limit %(start)s, %(page_len)s
""" % args)
def get_stock_items_for_return(ref_doc, parentfields):
"""return item codes filtered from doc, which are stock items"""
if isinstance(parentfields, basestring):
parentfields = [parentfields]
all_items = list(set([d.item_code for d in
ref_doc.get_all_children() if d.get("item_code")]))
stock_items = frappe.db.sql_list("""select name from `tabItem`
where is_stock_item='Yes' and name in (%s)""" % (", ".join(["%s"] * len(all_items))),
tuple(all_items))
return stock_items
def get_return_doc_and_details(args):
ref = frappe._dict()
# get ref_doc
if args.get("purpose") in return_map:
for fieldname, val in return_map[args.get("purpose")].items():
if args.get(fieldname):
ref.fieldname = fieldname
ref.doc = frappe.get_doc(val[0], args.get(fieldname))
ref.parentfields = val[1]
break
return ref
return_map = {
"Sales Return": {
# [Ref DocType, [Item tables' parentfields]]
"delivery_note_no": ["Delivery Note", ["delivery_note_details", "packing_details"]],
"sales_invoice_no": ["Sales Invoice", ["entries", "packing_details"]]
},
"Purchase Return": {
"purchase_receipt_no": ["Purchase Receipt", ["purchase_receipt_details"]]
}
}
@frappe.whitelist()
def make_return_jv(stock_entry):
se = frappe.get_doc("Stock Entry", stock_entry)
if not se.purpose in ["Sales Return", "Purchase Return"]:
return
ref = get_return_doc_and_details(se)
if ref.doc.doctype == "Delivery Note":
result = make_return_jv_from_delivery_note(se, ref)
elif ref.doc.doctype == "Sales Invoice":
result = make_return_jv_from_sales_invoice(se, ref)
elif ref.doc.doctype == "Purchase Receipt":
result = make_return_jv_from_purchase_receipt(se, ref)
# create jv doc and fetch balance for each unique row item
jv = frappe.new_doc("Journal Voucher")
jv.update({
"posting_date": se.posting_date,
"voucher_type": se.purpose == "Sales Return" and "Credit Note" or "Debit Note",
"fiscal_year": se.fiscal_year,
"company": se.company
})
from erpnext.accounts.utils import get_balance_on
for r in result:
jv.append("entries", {
"account": r.get("account"),
"against_invoice": r.get("against_invoice"),
"against_voucher": r.get("against_voucher"),
"balance": get_balance_on(r.get("account"), se.posting_date) if r.get("account") else 0
})
return jv
def make_return_jv_from_sales_invoice(se, ref):
# customer account entry
parent = {
"account": ref.doc.debit_to,
"against_invoice": ref.doc.name,
}
# income account entries
children = []
for se_item in se.get("mtn_details"):
# find item in ref.doc
ref_item = ref.doc.get({"item_code": se_item.item_code})[0]
account = get_sales_account_from_item(ref.doc, ref_item)
if account not in children:
children.append(account)
return [parent] + [{"account": account} for account in children]
def get_sales_account_from_item(doc, ref_item):
account = None
if not getattr(ref_item, "income_account", None):
if ref_item.parent_item:
parent_item = doc.get(doc.fname, {"item_code": ref_item.parent_item})[0]
account = parent_item.income_account
else:
account = ref_item.income_account
return account
def make_return_jv_from_delivery_note(se, ref):
invoices_against_delivery = get_invoice_list("Sales Invoice Item", "delivery_note",
ref.doc.name)
if not invoices_against_delivery:
sales_orders_against_delivery = [d.against_sales_order for d in ref.doc.get_all_children() if getattr(d, "against_sales_order", None)]
if sales_orders_against_delivery:
invoices_against_delivery = get_invoice_list("Sales Invoice Item", "sales_order",
sales_orders_against_delivery)
if not invoices_against_delivery:
return []
packing_item_parent_map = dict([[d.item_code, d.parent_item] for d in ref.doc.get(ref.parentfields[1])])
parent = {}
children = []
for se_item in se.get("mtn_details"):
for sales_invoice in invoices_against_delivery:
si = frappe.get_doc("Sales Invoice", sales_invoice)
if se_item.item_code in packing_item_parent_map:
ref_item = si.get({"item_code": packing_item_parent_map[se_item.item_code]})
else:
ref_item = si.get({"item_code": se_item.item_code})
if not ref_item:
continue
ref_item = ref_item[0]
account = get_sales_account_from_item(si, ref_item)
if account not in children:
children.append(account)
if not parent:
parent = {"account": si.debit_to}
break
if len(invoices_against_delivery) == 1:
parent["against_invoice"] = invoices_against_delivery[0]
result = [parent] + [{"account": account} for account in children]
return result
def get_invoice_list(doctype, link_field, value):
if isinstance(value, basestring):
value = [value]
return frappe.db.sql_list("""select distinct parent from `tab%s`
where docstatus = 1 and `%s` in (%s)""" % (doctype, link_field,
", ".join(["%s"]*len(value))), tuple(value))
def make_return_jv_from_purchase_receipt(se, ref):
invoice_against_receipt = get_invoice_list("Purchase Invoice Item", "purchase_receipt",
ref.doc.name)
if not invoice_against_receipt:
purchase_orders_against_receipt = [d.prevdoc_docname for d in
ref.doc.get(ref.doc.fname, {"prevdoc_doctype": "Purchase Order"})
if getattr(d, "prevdoc_docname", None)]
if purchase_orders_against_receipt:
invoice_against_receipt = get_invoice_list("Purchase Invoice Item", "purchase_order",
purchase_orders_against_receipt)
if not invoice_against_receipt:
return []
parent = {}
children = []
for se_item in se.get("mtn_details"):
for purchase_invoice in invoice_against_receipt:
pi = frappe.get_doc("Purchase Invoice", purchase_invoice)
ref_item = pi.get({"item_code": se_item.item_code})
if not ref_item:
continue
ref_item = ref_item[0]
account = ref_item.expense_account
if account not in children:
children.append(account)
if not parent:
parent = {"account": pi.credit_to}
break
if len(invoice_against_receipt) == 1:
parent["against_voucher"] = invoice_against_receipt[0]
result = [parent] + [{"account": account} for account in children]
return result
| agpl-3.0 | 3,160,072,189,511,768,600 | 35.210251 | 143 | 0.676546 | false | 3.015681 | false | false | false |
thp44/delphin_6_automation | data_process/2d_1d/archieve/moisture_content_comparison.py | 1 | 18274 | __author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import pandas as pd
import matplotlib.pyplot as plt
# RiBuild Modules
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
out_folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\2d_1d\processed_data'
graphic_folder = r'U:\RIBuild\2D_1D\Processed Results\4A'
hdf_file = out_folder + '/relative_moisture_content.h5'
# Open HDF
# Uninsulated
dresdenzp_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_high_ratio_uninsulated_4a')
dresdenzd_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_high_ratio_uninsulated_4a')
postdam_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'potsdam_high_ratio_uninsulated_4a')
dresdenzp_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_low_ratio_uninsulated_4a')
dresdenzd_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_low_ratio_uninsulated_4a')
postdam_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'potsdam_low_ratio_uninsulated_4a')
total_uninsulated_4a = pd.concat([dresdenzp_highratio_uninsulated_4a, dresdenzd_highratio_uninsulated_4a,
postdam_highratio_uninsulated_4a, dresdenzp_lowratio_uninsulated_4a,
dresdenzd_lowratio_uninsulated_4a, postdam_lowratio_uninsulated_4a])
# Insulated
dresdenzp_highratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_high_ratio_insulated_4a')
dresdenzd_highratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_high_ratio_insulated_4a')
postdam_highratio_insulated_4a = pd.read_hdf(hdf_file, 'potsdam_high_ratio_insulated_4a')
dresdenzp_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_low_ratio_insulated_4a')
dresdenzd_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_low_ratio_insulated_4a')
postdam_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'potsdam_low_ratio_insulated_4a')
total_insulated_4a = pd.concat([dresdenzp_highratio_insulated_4a, dresdenzd_highratio_insulated_4a,
postdam_highratio_insulated_4a, dresdenzp_lowratio_insulated_4a,
dresdenzd_lowratio_insulated_4a, postdam_lowratio_insulated_4a])
def plots(plot, save=False):
"""
Creates box plots from all the wall scenarios
"""
if plot == 'uninsulated' or plot == 'all':
plt.figure('dresdenzp_highratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzp_highratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_highratio_uninsulated_4a_moisture")
plt.figure('dresdenzd_highratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzd_highratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_highratio_uninsulated_4a_moisture")
plt.figure('postdam_highratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
postdam_highratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Potsdam - Mortar: High Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/postdam_highratio_uninsulated_4a_moisture")
plt.figure('dresdenzp_lowratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzp_lowratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_lowratio_uninsulated_4a_moisture")
plt.figure('dresdenzd_lowratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzd_lowratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_lowratio_uninsulated_4a_moisture")
plt.figure('postdam_lowratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
postdam_lowratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/postdam_lowratio_uninsulated_4a_moisture")
plt.figure('total_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
total_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: All - Mortar: All - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/total_uninsulated_4a_moisture")
if plot == 'insulated' or plot == 'all':
plt.figure('dresdenzp_highratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzp_highratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_highratio_insulated_4a_moisture")
plt.figure('dresdenzd_highratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzd_highratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_highratio_insulated_4a_moisture")
plt.figure('postdam_highratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
postdam_highratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Potsdam - Mortar: High Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/postdam_highratio_insulated_4a_moisture")
plt.figure('dresdenzp_lowratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzp_lowratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_lowratio_insulated_4a_moisture")
plt.figure('dresdenzd_lowratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzd_lowratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_lowratio_insulated_4a_moisture")
plt.figure('postdam_lowratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
postdam_lowratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/postdam_lowratio_insulated_4a_moisture")
plt.figure('total_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
total_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: All - Mortar: All - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/total_insulated_4a_moisture")
plt.show()
plots('all', False)
def std3_ratio(print_=False, excel=False):
"""Computes ratio of outliers in the data sets. Outliers is here defined as data points deviating with more
the 3 standard deviations from the mean."""
std3_uninsulated_ratio_ = uninsulated()
std3_insulated_ratio_ = insulated()
if print_:
print('Uninsulated')
print(std3_uninsulated_ratio_)
print('')
print('Insulated')
print(std3_insulated_ratio_)
if excel:
writer = pd.ExcelWriter(f'{out_folder}/moisture_std_ratios.xlsx')
std3_uninsulated_ratio_.to_excel(writer, 'Uninsulated')
std3_insulated_ratio_.to_excel(writer, 'Insulated')
writer.save()
def uninsulated():
"""Computes the outliers for the uninsulated cases"""
outliers_total_uninsulated = (total_uninsulated_4a.shape[0] -
total_uninsulated_4a.sub(total_uninsulated_4a.mean())
.div(total_uninsulated_4a.std()).abs().lt(3).sum()) / total_uninsulated_4a.shape[0]
outliers_zd_high_uninsulated = (dresdenzd_highratio_uninsulated_4a.shape[0] -
dresdenzd_highratio_uninsulated_4a.sub(dresdenzd_highratio_uninsulated_4a.mean())
.div(dresdenzd_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_highratio_uninsulated_4a.shape[0]
outliers_zp_high_uninsulated = (dresdenzp_highratio_uninsulated_4a.shape[0] -
dresdenzp_highratio_uninsulated_4a.sub(dresdenzp_highratio_uninsulated_4a.mean())
.div(dresdenzp_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_highratio_uninsulated_4a.shape[0]
outliers_pd_high_uninsulated = (postdam_highratio_uninsulated_4a.shape[0] -
postdam_highratio_uninsulated_4a.sub(postdam_highratio_uninsulated_4a.mean())
.div(postdam_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ postdam_highratio_uninsulated_4a.shape[0]
outliers_zd_low_uninsulated = (dresdenzd_lowratio_uninsulated_4a.shape[0] -
dresdenzd_lowratio_uninsulated_4a.sub(dresdenzd_lowratio_uninsulated_4a.mean())
.div(dresdenzd_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_lowratio_uninsulated_4a.shape[0]
outliers_zp_low_uninsulated = (dresdenzp_lowratio_uninsulated_4a.shape[0] -
dresdenzp_lowratio_uninsulated_4a.sub(dresdenzp_lowratio_uninsulated_4a.mean())
.div(dresdenzp_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_lowratio_uninsulated_4a.shape[0]
outliers_pd_low_uninsulated = (postdam_lowratio_uninsulated_4a.shape[0] -
postdam_lowratio_uninsulated_4a.sub(postdam_lowratio_uninsulated_4a.mean())
.div(postdam_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ postdam_lowratio_uninsulated_4a.shape[0]
outliers_uninsulated_ratio_ = pd.concat([outliers_total_uninsulated, outliers_zd_high_uninsulated,
outliers_zp_high_uninsulated, outliers_pd_high_uninsulated,
outliers_zd_low_uninsulated, outliers_zp_low_uninsulated,
outliers_pd_low_uninsulated], axis=1)
outliers_uninsulated_ratio_.columns = ["Brick: All - Mortar: All - Insulation: None",
"Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: None",
"Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: None",
"Brick: Potsdam - Mortar: High Cement Ratio - Insulation: None",
"Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: None",
"Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: None",
"Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: None"]
return outliers_uninsulated_ratio_
def insulated():
"""Computes the outliers for the insulated cases"""
outliers_total_insulated = (total_insulated_4a.shape[0] - total_insulated_4a.sub(total_insulated_4a.mean())
.div(total_insulated_4a.std()).abs().lt(3).sum()) / total_insulated_4a.shape[0]
outliers_zd_high_insulated = (dresdenzd_highratio_insulated_4a.shape[0] -
dresdenzd_highratio_insulated_4a.sub(dresdenzd_highratio_insulated_4a.mean())
.div(dresdenzd_highratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_highratio_insulated_4a.shape[0]
outliers_zp_high_insulated = (dresdenzp_highratio_insulated_4a.shape[0] -
dresdenzp_highratio_insulated_4a.sub(dresdenzp_highratio_insulated_4a.mean())
.div(dresdenzp_highratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_highratio_insulated_4a.shape[0]
outliers_pd_high_insulated = (postdam_highratio_insulated_4a.shape[0] -
postdam_highratio_insulated_4a.sub(postdam_highratio_insulated_4a.mean())
.div(postdam_highratio_insulated_4a.std()).abs().lt(3).sum()) \
/ postdam_highratio_insulated_4a.shape[0]
outliers_zd_low_insulated = (dresdenzd_lowratio_insulated_4a.shape[0] -
dresdenzd_lowratio_insulated_4a.sub(dresdenzd_lowratio_insulated_4a.mean())
.div(dresdenzd_lowratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_lowratio_insulated_4a.shape[0]
outliers_zp_low_insulated = (dresdenzp_lowratio_insulated_4a.shape[0] -
dresdenzp_lowratio_insulated_4a.sub(dresdenzp_lowratio_insulated_4a.mean())
.div(dresdenzp_lowratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_lowratio_insulated_4a.shape[0]
outliers_pd_low_insulated = (postdam_lowratio_insulated_4a.shape[0] -
postdam_lowratio_insulated_4a.sub(postdam_lowratio_insulated_4a.mean())
.div(postdam_lowratio_insulated_4a.std()).abs().lt(3).sum()) \
/ postdam_lowratio_insulated_4a.shape[0]
std2_insulated_ratio_ = pd.concat([outliers_total_insulated, outliers_zd_high_insulated,
outliers_zp_high_insulated, outliers_pd_high_insulated,
outliers_zd_low_insulated, outliers_zp_low_insulated,
outliers_pd_low_insulated], axis=1)
std2_insulated_ratio_.columns = ["Brick: All - Mortar: All - Insulation: None",
"Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: Calcium Silicate",
"Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: Calcium Silicate",
"Brick: Potsdam - Mortar: High Cement Ratio - Insulation: Calcium Silicate",
"Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: Calcium Silicate",
"Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: Calcium Silicate",
"Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: Calcium Silicate"]
return std2_insulated_ratio_
#std3_ratio(False, True)
| mit | -2,501,209,804,834,219,500 | 55.575851 | 120 | 0.600088 | false | 3.176982 | false | false | false |
MaT1g3R/YasenBaka | cogs/moderation.py | 1 | 3215 | from discord import DiscordException, Forbidden, HTTPException
from discord.ext import commands
from discord.ext.commands import Context
from bot import Yasen
from scripts.checks import has_manage_message, is_admin, no_pm
from scripts.discord_utils import leading_members
from scripts.helpers import parse_number
class Moderation:
"""
Moderation commands.
"""
__slots__ = ('bot',)
def __init__(self, bot: Yasen):
self.bot = bot
def __local_check(self, ctx: Context):
return no_pm(ctx)
@commands.command()
@commands.check(is_admin)
async def masspm(self, ctx: Context, *, args: str = None):
"""
Description: Send pm to all mentioned members.
Restriction: Cannot be used in private message.
Permission Required: Administrator
Usage: "`{prefix}masspm @mention0 @mention1 my message`"
"""
if not args:
await ctx.send(
'Please mention at least one member and include '
'a message to send.'
)
return
members, msg = leading_members(ctx, args)
if not members:
await ctx.send('Please mention at least one member.')
return
if not msg:
await ctx.send('Please enter a message for me to send.')
return
sent = []
failed = []
for m in members:
try:
await m.send(msg)
sent.append(m.display_name)
except DiscordException as e:
self.bot.logger.warn(str(e))
failed.append(m.display_name)
success_msg = (f'PM sent to the following members:'
f'\n```\n{", ".join(sent)}\n```') if sent else ''
failed_msg = (f'Failed to send PMs to the following members:'
f'\n```\n{", ".join(failed)}\n```') if failed else ''
if success_msg or failed_msg:
await ctx.send(f'{success_msg}{failed_msg}')
@commands.command()
@commands.check(has_manage_message)
async def purge(self, ctx: Context, num=None):
"""
Description: Purge up to 99 messages in the current channel.
Restriction: |
Cannot be used in private message.
Can only purge from 1 to 99 (inclusive) messages at once.
Permission Required: Manage Messages
Usage: "`{prefix}purge num` where num is a number between 1 and 99."
"""
num = parse_number(num, int) or 0
if not 1 <= num <= 99:
await ctx.send(
'Please enter a number between 1 and 99.', delete_after=3
)
return
try:
deleted = await ctx.channel.purge(limit=num + 1)
except Forbidden:
await ctx.send('I do not have the permissions to purge messages.')
except HTTPException:
await ctx.send(':no_entry_sign: Purging messages failed.')
else:
deleted_num = len(deleted) - 1
msg_str = (f'{deleted_num} message' if num == 1
else f'{deleted_num} messages')
await ctx.send(f':recycle: Purged {msg_str}.', delete_after=3)
| apache-2.0 | -242,369,800,436,003,700 | 35.534091 | 78 | 0.565474 | false | 4.197128 | false | false | false |
ceroytres/cat_nets | cat_nets/datasets/read_pets.py | 1 | 1970 | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import tensorflow as tf
import csv
def catClassification_loader(path):
cat_names = ['Abyssinian','Bengal','Birman','Bombay','British_Shorthair',
'Egyptian_Mau','Maine_Coon','Persian','Ragdoll','Russian_Blue',
'Siamese','Sphynx']
cat_dict = dict(zip(cat_names,range(len(cat_names))))
labels_list, filename_list = [], []
with open(path,mode = 'r') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
labels_list.append(cat_dict[row[0]])
filename_list.append(row[1])
labels_list = tf.convert_to_tensor(labels_list)
images_list = tf.convert_to_tensor(filename_list)
filename_queue = tf.train.slice_input_producer([labels_list,images_list], shuffle=True)
label = filename_queue[0]
filename = filename_queue[1]
raw_image = tf.read_file(filename)
image = tf.image.decode_jpeg(raw_image, channels = 3)
cat_dict = dict(zip(cat_dict.values(),cat_dict.keys()))
return image, label, cat_dict
# image = tf.image.resize_images(image,image_size,
# method = tf.image.ResizeMethod.BILINEAR,
# align_corners= True)
# image = tf.cast(image, tf.uint8)
#
# batch_size = batch_size
#
# capacity = min_after_dequeue + 3 * batch_size
#
# image_batch, label_batch = tf.train.shuffle_batch([image,label],
# batch_size = batch_size,
# capacity = capacity,
# min_after_dequeue = min_after_dequeue,
# num_threads=num_threads)
# return image_batch,label_batch
| mit | -7,524,138,306,632,480,000 | 32.561404 | 91 | 0.540102 | false | 3.594891 | false | false | false |
rwgdrummer/maskgen | setuptools-version/setuptools_maskgen_version.py | 1 | 1613 | from pkg_resources import get_distribution
from subprocess import check_output
import requests
import json
repos = 'rwgdrummer/maskgen'
giturl = 'https://api.github.com/repos'
def get_commit():
url = giturl + '/' + repos + '/pulls?state=closed'
resp = requests.get(url)
if resp.status_code == requests.codes.ok:
content = json.loads(resp.content)
for item in content:
if 'merged_at' in item and 'merge_commit_sha' in item:
return item['merge_commit_sha']
return None
def get_version():
import os
filename = 'VERSION'
#if os.path.exists('.git/ORIG_HEAD'):
# filename = '.git/ORIG_HEAD'
#else:
print os.path.abspath(filename)
with open(filename) as fp:
return fp.readline()
def validate_version_format(dist, attr, value):
try:
version = get_version().strip()
except:
version = get_distribution(dist.get_name()).version
else:
version = format_version(version=version, fmt=value)
dist.metadata.version = version
def format_version(version, fmt='{gitsha}'):
return fmt.format(gitsha=version)
if __name__ == "__main__":
# determine version from git
git_version = get_version().strip()
git_version = format_version(version=git_version)
# monkey-patch `setuptools.setup` to inject the git version
import setuptools
original_setup = setuptools.setup
def setup(version=None, *args, **kw):
return original_setup(version=git_version, *args, **kw)
setuptools.setup = setup
# import the packages's setup module
import setup
| bsd-3-clause | -12,361,799,161,684,228 | 27.298246 | 66 | 0.651581 | false | 3.708046 | false | false | false |
Solomoriah/gdmodule | demo/gddemo.py | 1 | 1024 | #!/usr/bin/env python
import gd, os, cStringIO, urllib2
os.environ["GDFONTPATH"] = "."
FONT = "Pacifico"
def simple():
im = gd.image((200, 200))
white = im.colorAllocate((255, 255, 255))
black = im.colorAllocate((0, 0, 0))
red = im.colorAllocate((255, 0, 0))
blue = im.colorAllocate((0, 0, 255))
im.colorTransparent(white)
im.interlace(1)
im.rectangle((0,0),(199,199),black)
im.arc((100,100),(195,175),0,360,blue)
im.fill((100,100),red)
print im.get_bounding_rect(FONT, 12.0, 0.0, (10, 100), "Hello Python")
im.string_ttf(FONT, 20.0, 0.0, (10, 100), "Hello Python", black)
f=open("xx.png","w")
im.writePng(f)
f.close()
f=open("xx.jpg", "w")
im.writeJpeg(f,100)
f.close()
f=cStringIO.StringIO()
im.writePng(f)
print "PNG size:", len(f.getvalue())
f.close()
f = urllib2.urlopen("http://www.gnu.org/graphics/gnu-head-sm.jpg")
im = gd.image(f, "jpg")
f.close()
print "GNU Image Size:", im.size()
simple()
| bsd-3-clause | 8,312,121,099,719,976,000 | 20.787234 | 74 | 0.583008 | false | 2.708995 | false | false | false |
quentinl-c/network_testing-client | app/editor.py | 1 | 2631 | from collaborator import Collaborator
import os
import random
import logging
import time
logging.basicConfig(filename=__name__ + '.log', level=logging.DEBUG)
logger = logging.getLogger(__name__)
HOME_DIR = os.getenv('HOME_DIR', '/home/')
WRITER_SELECTOR = 'ace_text-input'
READER_SELECTOR = 'ace_content'
FILTER = '[Tracker]'
tempo = 15 # Client will wait 20 secondes befores getting results
class Editor(Collaborator):
"""docstring for Editor"""
def __init__(self, controller, target, typing_speed, word_to_type):
Collaborator.__init__(self, controller, target)
logger.debug("=== Editor is being instanciated ===")
self.word_to_type = None
self.counter = 0
if len(word_to_type) > 0:
selector = WRITER_SELECTOR
self.word_to_type = word_to_type
else:
selector = READER_SELECTOR
self.word_to_type = None
self.select = None
while self.select is None:
self._driver.implicitly_wait(20)
self.select = self._driver.find_element_by_class_name(
selector)
def run(self):
self.alive = True
if self.word_to_type is not None:
beg_time = random.uniform(2.0, 6.0)
time.sleep(beg_time)
while self.alive:
if self.word_to_type is not None:
w = ''.join((self.word_to_type, ';',
str(self.counter).zfill(6)))
self.select.send_keys(w)
self.counter += 1
time.sleep(2)
else:
self.select.text
self.saveTxt()
def getResults(self):
time.sleep(tempo)
logger.debug("=== Get results from log files ===")
tmp = []
self.alive = False
time.sleep(tempo)
with open(self._log_path, 'r') as content_file:
for line in content_file:
beg = line.find(FILTER)
if beg != -1:
rec = line[beg:].split(',')[0].split('"')[0]
tmp.append(rec)
content = '\n'.join(tmp)
self._controller.sendResults(content)
def saveTxt(self):
if self.word_to_type is not None:
self.select = None
while self.select is None:
self._driver.implicitly_wait(20)
self.select = self._driver.find_element_by_class_name(
READER_SELECTOR)
content = self.select.text
file = open(HOME_DIR + str(self._controller.id) + '_content.txt', 'w')
file.write(content)
file.close()
| gpl-3.0 | 31,586,578,786,040,000 | 30.698795 | 78 | 0.54618 | false | 3.852123 | false | false | false |
google/trax | trax/models/rnn.py | 1 | 9301 | # coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""RNNs (recursive neural networks)."""
from trax import layers as tl
from trax.fastmath import numpy as jnp
def RNNLM(vocab_size,
d_model=512,
n_layers=2,
rnn_cell=tl.LSTMCell,
rnn_cell_d_state_multiplier=2,
dropout=0.1,
mode='train'):
"""Returns an RNN language model.
This model performs autoregressive language modeling:
- input: rank 2 tensor representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). The tensor
elements are integers in `range(vocab_size)`, and `0` values mark padding
positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
Args:
vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
d_model: Embedding depth throughout the model.
n_layers: Number of RNN layers.
rnn_cell: Type of RNN cell; must be a subclass of `Layer`.
rnn_cell_d_state_multiplier: Multiplier for feature depth of RNN cell
state.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout.
mode: If `'predict'`, use fast inference; if `'train'` apply dropout.
Returns:
An RNN language model as a layer that maps from a tensor of tokens
to activations over a vocab set.
"""
if n_layers != 2: # TODO(jonni): Remove n_layers arg, if it can't vary?
raise ValueError(f'Number of layers must be set to 2; instead got'
f' {n_layers}.')
def MultiRNNCell():
"""Multi-layer RNN cell."""
return tl.Serial(
tl.Parallel([], tl.Split(n_items=n_layers)),
tl.SerialWithSideOutputs(
[rnn_cell(n_units=d_model) for _ in range(n_layers)]),
tl.Parallel([], tl.Concatenate(n_items=n_layers))
)
zero_state = tl.MakeZeroState( # pylint: disable=no-value-for-parameter
depth_multiplier=n_layers * rnn_cell_d_state_multiplier
)
return tl.Serial(
tl.ShiftRight(mode=mode),
tl.Embedding(vocab_size, d_model),
tl.Dropout(rate=dropout, mode=mode),
tl.Branch([], zero_state),
tl.Scan(MultiRNNCell(), axis=1, mode=mode),
tl.Select([0], n_in=2), # Drop RNN state.
tl.Dense(vocab_size),
)
def GRULM(vocab_size=256,
d_model=512,
n_layers=1,
mode='train'):
"""Returns a GRU (gated recurrent unit) language model.
This model performs autoregressive language modeling:
- input: rank 2 tensor representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). The tensor
elements are integers in `range(vocab_size)`, and `0` values mark padding
positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
Args:
vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
d_model: Embedding depth throughout the model.
n_layers: Number of GRU layers.
mode: If `'predict'`, use fast inference (and omit the right shift).
Returns:
A GRU language model as a layer that maps from a tensor of tokens
to activations over a vocab set.
"""
return tl.Serial(
tl.ShiftRight(mode=mode),
tl.Embedding(vocab_size, d_model),
[tl.GRU(d_model, mode=mode) for _ in range(n_layers)],
tl.Dense(vocab_size),
)
# TODO(jonni): Decide names (here and Transformer): input/source, output/target
# TODO(jonni): Align with Transfomer: (attention-)dropout, n-(attention-)heads
def LSTMSeq2SeqAttn(input_vocab_size=256,
target_vocab_size=256,
d_model=512,
n_encoder_layers=2,
n_decoder_layers=2,
n_attention_heads=1,
attention_dropout=0.0,
mode='train'):
"""Returns an LSTM sequence-to-sequence model with attention.
This model is an encoder-decoder that performs tokenized string-to-string
("source"-to-"target") transduction:
- inputs (2):
- source: rank 2 tensor representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length). The
tensor elements are integers in `range(input_vocab_size)`, and `0`
values mark padding positions.
- target: rank 2 tensor representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length). The
tensor elements are integers in `range(output_vocab_size)`, and `0`
values mark padding positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
An example use would be to translate (tokenized) sentences from English to
German.
The model works as follows:
* Input encoder runs on the input tokens and creates activations that
are used as both keys and values in attention.
* Pre-attention decoder runs on the targets and creates
activations that are used as queries in attention.
* Attention runs on the queries, keys and values masking out input padding.
* Decoder runs on the result, followed by a cross-entropy loss.
Args:
input_vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
target_vocab_size: Target vocabulary size.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
n_encoder_layers: Number of LSTM layers in the encoder.
n_decoder_layers: Number of LSTM layers in the decoder after attention.
n_attention_heads: Number of attention heads.
attention_dropout: Stochastic rate (probability) for dropping an activation
value when applying dropout within an attention block.
mode: If `'predict'`, use fast inference. If `'train'`, each attention block
will include dropout; else, it will pass all values through unaltered.
Returns:
An LSTM sequence-to-sequence model as a layer that maps from a
source-target tokenized text pair to activations over a vocab set.
"""
input_encoder = tl.Serial(
tl.Embedding(input_vocab_size, d_model),
[tl.LSTM(d_model) for _ in range(n_encoder_layers)],
)
pre_attention_decoder = tl.Serial(
tl.ShiftRight(mode=mode),
tl.Embedding(target_vocab_size, d_model),
tl.LSTM(d_model, mode=mode),
)
def PrepareAttentionInputs():
"""Layer that prepares queries, keys, values and mask for attention."""
def F(encoder_activations, decoder_activations, input_tokens):
keys = values = encoder_activations
queries = decoder_activations
# Mask is 1 where inputs are not padding (0) and 0 where they are padding.
mask = (input_tokens != 0)
# We need to add axes to the mask for attention heads and decoder length.
mask = jnp.reshape(mask, (mask.shape[0], 1, 1, mask.shape[1]))
# Broadcast so mask is [batch, 1 for heads, decoder-len, encoder-len].
mask = mask + jnp.zeros((1, 1, decoder_activations.shape[1], 1))
mask = mask.astype(jnp.float32)
return queries, keys, values, mask
return tl.Fn('PrepareAttentionInputs', F, n_out=4)
return tl.Serial( # in-toks, target-toks
tl.Select([0, 1, 0, 1]), # in-toks, target-toks, in-toks, target-toks
tl.Parallel(input_encoder, pre_attention_decoder),
PrepareAttentionInputs(), # q, k, v, mask, target-toks
tl.Residual(
tl.AttentionQKV(d_model, n_heads=n_attention_heads,
dropout=attention_dropout, mode=mode,
cache_KV_in_predict=True)
), # decoder-vecs, mask, target-toks
tl.Select([0, 2]), # decoder-vecs, target-toks
[tl.LSTM(d_model, mode=mode) for _ in range(n_decoder_layers)],
tl.Dense(target_vocab_size),
tl.LogSoftmax()
)
| apache-2.0 | -1,051,844,559,480,561,700 | 39.973568 | 80 | 0.669498 | false | 3.956189 | false | false | false |
textcad/pyMagpie | magpie/motor.py | 1 | 2154 | #!/usr/bin/env python
from textcad import *
import magpie.utility
import magpie.hardware
class Stepper(component.Element):
def __init__(self,
size="GenericNEMA17",
negative=False,
negativeLength=10):
component.Element.__init__(self, name="stepper")
self.size = size
self.width = 0
self.length = 0
self.mountSpacing = 0
self.mountScrew = ""
self.flangeDiameter = 0
self.flangeHeight = 0
self.shaftLength = 0
self.shaftDiameter = 0
self.negative = negative
self.negativeLength = negativeLength
magpie.utility.get_dimensions(size=size, name="stepperMotor", obj=self)
self.holeLocations = [[self.mountSpacing/2, self.mountSpacing/2, 0],
[self.mountSpacing/2, -self.mountSpacing/2, 0],
[-self.mountSpacing/2, self.mountSpacing/2, 0],
[-self.mountSpacing/2, -self.mountSpacing/2, 0]]
self.screw = magpie.hardware.CapScrew(size=self.mountScrew)
self.location = [0, 0, 0]
self.color = [0.5, 0.5, 0.5]
self.construction = self._construction()
def _construction(self):
body = element.Cube([self.width, self.width, self.length])
body.center = [True, True, False]
body.location = [0, 0, -self.length]
flange = element.Cylinder(radius=self.flangeDiameter/2,
height=self.flangeHeight)
shaft = element.Cylinder(radius=self.shaftDiameter/2,
height=self.shaftLength+self.flangeHeight)
asm = body + flange + shaft
if self.negative:
# Flange
asm += element.Hole(radius=self.flangeDiameter/2,
height=self.negativeLength)
# Mount holes
for hole in self.holeLocations:
s = element.Hole(radius=self.screw.outerDiameter/2,
height=self.negativeLength)
s.location = hole
asm += s
return asm
| mit | -7,378,518,400,631,267,000 | 38.888889 | 79 | 0.551532 | false | 3.930657 | false | false | false |
restless/django-guardian | guardian/utils.py | 1 | 4832 | """
django-guardian helper functions.
Functions defined within this module should be considered as django-guardian's
internal functionality. They are **not** guaranteed to be stable - which means
they actual input parameters/output type may change in future releases.
"""
import os
import logging
from itertools import chain
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseForbidden, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext, TemplateDoesNotExist
from django.utils.http import urlquote
from guardian.compat import AnonymousUser
from guardian.compat import Group
from guardian.compat import User
from guardian.conf import settings as guardian_settings
from guardian.exceptions import NotUserNorGroup
logger = logging.getLogger(__name__)
abspath = lambda *p: os.path.abspath(os.path.join(*p))
def get_anonymous_user():
"""
Returns ``User`` instance (not ``AnonymousUser``) depending on
``ANONYMOUS_USER_ID`` configuration.
"""
return User.objects.get(id=guardian_settings.ANONYMOUS_USER_ID)
def get_groups_backref_name():
"""
Returns backreference name from Group to user model.
"""
return User._meta.get_field_by_name('groups')[0].related_query_name()
def get_identity(identity):
"""
Returns (user_obj, None) or (None, group_obj) tuple depending on what is
given. Also accepts AnonymousUser instance but would return ``User``
instead - it is convenient and needed for authorization backend to support
anonymous users.
:param identity: either ``User`` or ``Group`` instance
:raises ``NotUserNorGroup``: if cannot return proper identity instance
**Examples**::
>>> user = User.objects.create(username='joe')
>>> get_identity(user)
(<User: joe>, None)
>>> group = Group.objects.create(name='users')
>>> get_identity(group)
(None, <Group: users>)
>>> anon = AnonymousUser()
>>> get_identity(anon)
(<User: AnonymousUser>, None)
>>> get_identity("not instance")
...
NotUserNorGroup: User/AnonymousUser or Group instance is required (got )
"""
if isinstance(identity, AnonymousUser):
identity = get_anonymous_user()
if isinstance(identity, User):
return identity, None
elif isinstance(identity, Group):
return None, identity
raise NotUserNorGroup("User/AnonymousUser or Group instance is required "
"(got %s)" % identity)
def get_403_or_None(request, perms, obj=None, login_url=None,
redirect_field_name=None, return_403=False, accept_global_perms=False):
login_url = login_url or settings.LOGIN_URL
redirect_field_name = redirect_field_name or REDIRECT_FIELD_NAME
# Handles both original and with object provided permission check
# as ``obj`` defaults to None
has_permissions = False
# global perms check first (if accept_global_perms)
if accept_global_perms:
has_permissions = all(request.user.has_perm(perm) for perm in perms)
# if still no permission granted, try obj perms
if not has_permissions:
has_permissions = all(request.user.has_perm(perm, obj) for perm in perms)
if not has_permissions:
if return_403:
if guardian_settings.RENDER_403:
try:
response = render_to_response(
guardian_settings.TEMPLATE_403, {},
RequestContext(request))
response.status_code = 403
return response
except TemplateDoesNotExist, e:
if settings.DEBUG:
raise e
elif guardian_settings.RAISE_403:
raise PermissionDenied
return HttpResponseForbidden()
else:
path = urlquote(request.get_full_path())
tup = login_url, redirect_field_name, path
return HttpResponseRedirect("%s?%s=%s" % tup)
def clean_orphan_obj_perms():
"""
Seeks and removes all object permissions entries pointing at non-existing
targets.
Returns number of removed objects.
"""
from guardian.models import UserObjectPermission
from guardian.models import GroupObjectPermission
deleted = 0
# TODO: optimise
for perm in chain(UserObjectPermission.objects.all(),
GroupObjectPermission.objects.all()):
if perm.content_object is None:
logger.debug("Removing %s (pk=%d)" % (perm, perm.pk))
perm.delete()
deleted += 1
logger.info("Total removed orphan object permissions instances: %d" %
deleted)
return deleted
| bsd-2-clause | 8,977,540,974,672,123,000 | 32.324138 | 81 | 0.666598 | false | 4.368897 | false | false | false |
hamole/pbl8 | pbl8_project/pbl/migrations/0003_auto.py | 1 | 3700 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field studies_for on 'Treatment'
m2m_table_name = db.shorten_name(u'pbl_treatment_studies_for')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('treatment', models.ForeignKey(orm[u'pbl.treatment'], null=False)),
('study', models.ForeignKey(orm[u'pbl.study'], null=False))
))
db.create_unique(m2m_table_name, ['treatment_id', 'study_id'])
# Adding M2M table for field studies_against on 'Treatment'
m2m_table_name = db.shorten_name(u'pbl_treatment_studies_against')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('treatment', models.ForeignKey(orm[u'pbl.treatment'], null=False)),
('study', models.ForeignKey(orm[u'pbl.study'], null=False))
))
db.create_unique(m2m_table_name, ['treatment_id', 'study_id'])
# Removing M2M table for field treatment on 'Study'
db.delete_table(db.shorten_name(u'pbl_study_treatment'))
def backwards(self, orm):
# Removing M2M table for field studies_for on 'Treatment'
db.delete_table(db.shorten_name(u'pbl_treatment_studies_for'))
# Removing M2M table for field studies_against on 'Treatment'
db.delete_table(db.shorten_name(u'pbl_treatment_studies_against'))
# Adding M2M table for field treatment on 'Study'
m2m_table_name = db.shorten_name(u'pbl_study_treatment')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('study', models.ForeignKey(orm[u'pbl.study'], null=False)),
('treatment', models.ForeignKey(orm[u'pbl.treatment'], null=False))
))
db.create_unique(m2m_table_name, ['study_id', 'treatment_id'])
models = {
u'pbl.study': {
'Meta': {'ordering': "('title',)", 'object_name': 'Study'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'funder': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'year': ('django.db.models.fields.IntegerField', [], {'default': '2014', 'max_length': '4'})
},
u'pbl.treatment': {
'Meta': {'ordering': "('name',)", 'object_name': 'Treatment'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'studies_against': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'studies_against+'", 'blank': 'True', 'to': u"orm['pbl.Study']"}),
'studies_for': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'studies_for+'", 'blank': 'True', 'to': u"orm['pbl.Study']"})
}
}
complete_apps = ['pbl'] | mit | -9,074,300,691,962,626,000 | 51.126761 | 195 | 0.592703 | false | 3.285968 | false | false | false |
blurstudio/cross3d | cross3d/softimage/external.py | 1 | 4267 | ##
# \namespace cross3d.softimage.external
#
# \remarks This class can be used even outside of softimage. It gives you info on where
# softimage is installed, and allows you to run scripts in softimage.
# To Access this class use: cross3d.external('softimage')
#
# \author dougl
# \author Blur Studio
# \date 01/21/14
#
#------------------------------------------------------------------------------------------------------------------------
import os
import subprocess
import xml.etree.cElementTree as ET
from cross3d import Exceptions
from cross3d.constants import ScriptLanguage
from cross3d.abstract.external import External as AbstractExternal
#------------------------------------------------------------------------------------------------------------------------
class External(AbstractExternal):
# In case the software is installed but not used don't find it when not passing in a version
_ignoredVersions = set(os.environ.get('CROSS3D_STUDIO_IGNORED_SOFTIMAGE', '').split(','))
# map years to version numbers
_yearForVersion = {'8': '2010', '9': '2011', '10': '2012', '11': '2013', '12': '2014', '13': '2015'}
@classmethod
def name(cls):
return 'Softimage'
@classmethod
def getFileVersion(cls, filepath):
"""
Reads the xsi version of an xsi file from the associated scntoc.
"""
scntoc_path = filepath + 'toc'
if os.path.isfile(scntoc_path):
tree = ET.parse(scntoc_path)
root = tree.getroot()
return root.get('xsi_version')
return None
@classmethod
def runScript(cls, script, version=None, architecture=64, language=ScriptLanguage.Python, debug=False, headless=True):
if os.path.exists(script):
scriptPath = script
else:
scriptPath = cls.scriptPath()
with open(scriptPath, "w") as fle:
fle.write(script)
binary = os.path.join(cls.binariesPath(version, architecture), 'xsibatch.exe' if headless else 'xsi.exe')
scriptArgumentName = '-script' if headless else '-uiscript'
# Contrinue makes sure there is no prompts.
command = [binary, '-continue', scriptArgumentName, scriptPath]
# Processing means that it will not shot the GUI and not grab a license.
if headless:
command.insert(1, '-processing')
process = subprocess.Popen(command, stdout=subprocess.PIPE)
# TODO: This is the way to check for success. But it is blocking.
# Writing the log file.
with open(cls.scriptLog(), 'w') as fle:
fle.write(process.stdout.read())
# Checking the error in the log file.
with open(cls.scriptLog()) as fle:
content = fle.read()
return False if 'FATAL' in content else True
@classmethod
def binariesPath(cls, version=None, architecture=64, language='English'):
""" Finds the install path for various software installations. If version is None, the default
it will return the latest installed version of the software. Raises cross3d.Exceptions.SoftwareNotInstalled
if the software is not installed.
:param version: The version of the software. Default is None
:param architecture: The bit type to query the registry for(32, 64). Default is 64
:param language: Optional language that may be required for specific softwares.
"""
from cross3d.migrate import winregistry
hive = 'HKEY_LOCAL_MACHINE'
hkey = r'Software\Autodesk\Softimage\InstallPaths'
ret = None
if version == None:
# Find the latest version
versions = winregistry.listRegKeyValues(hive, hkey, architecture=architecture)
for version in sorted(versions, key= lambda i: i[0], reverse=True):
if version[0] not in cls._ignoredVersions:
ret = version[1]
break
else:
version = cls._yearForVersion.get(unicode(version), version)
try:
ret = winregistry.registryValue(hive, hkey, unicode(version), architecture)[0]
except WindowsError:
raise Exceptions.SoftwareNotInstalled('Softimage', version=version, architecture=architecture, language=language)
# If the version is not installed this will return '.', we want to return False.
if ret:
return os.path.join(os.path.normpath(ret), 'Application', 'bin')
raise Exceptions.SoftwareNotInstalled('Softimage', version=version, architecture=architecture, language=language)
| mit | -2,558,668,745,692,272,000 | 36.790909 | 121 | 0.669088 | false | 3.591751 | false | false | false |
Geof23/SESABench_II | parboil/driver/benchmark.py | 1 | 19162 | # (c) 2007 The Board of Trustees of the University of Illinois.
import sys
import os
from os import path
import re
from itertools import imap, repeat, chain
import globals
import process
import parboilfile as pbf
from futures import Future
from error import ErrorType
class Benchmark(object):
"""A benchmark.
If the benchmark is malformed or otherwise invalid, only the 'name' and
'invalid' fields will be set. Otherwise all fields will be set.
Fields:
name The name of the benchmark. This is also the benchmark
directory name.
invalid None if the benchmark is valid; otherwise, an exception
describing why the benchmark is invalid.
path Full path of the benchmark directory.
descr A description of the benchmark.
impls A dictionary of benchmark source implementations.
datas A dictionary of data sets used to run the benchmark."""
def __init__(self, name, path = None, impls = [], datasets = [],
description=None, invalid=None):
self.name = name
self.invalid = invalid
if invalid is None:
self.path = path
self.impls = dict(imap(lambda i: (i.name, i), impls))
self.datas = dict(imap(lambda i: (i.name, i), datasets))
self.descr = description
def createFromName(name):
"""Scan the benchmark directory for the benchmark named 'name'
and create a benchmark object for it."""
bmkdir = globals.benchdir.getChildByName(name)
datadir = globals.datadir.getChildByName(name)
descr = process.read_description_file(bmkdir)
try:
# Scan implementations of the benchmark
impls = [BenchImpl.createFromDir(impl)
for impl in process.scan_for_benchmark_versions(bmkdir)]
# Scan data sets of the benchmark
datas = [BenchDataset.createFromDir(data)
for data in process.scan_for_benchmark_datasets(datadir)]
# If no exception occurred, the benchmark is valid
return Benchmark(name, bmkdir.getPath(), impls, datas, descr)
finally:
pass
#except Exception, e:
# return Benchmark(name, invalid=e)
createFromName = staticmethod(createFromName)
def describe(self):
"""Return a string describing this benchmark."""
if self.invalid:
return "Error in benchmark:\n" + str(self.invalid)
if self.descr is None:
header = "Benchmark '" + self.name + "'"
else:
header = self.descr
impls = " ".join([impl.name for impl in self.impls.itervalues()])
datas = " ".join([data.name for data in self.datas.itervalues()])
return header + "\nVersions: " + impls + "\nData sets: " + datas
def instance_check(x):
if not isinstance(x, Benchmark):
raise TypeError, "argument must be an instance of Benchmark"
instance_check = staticmethod(instance_check)
class BenchImpl(object):
"""An implementation of a benchmark."""
def __init__(self, dir, description=None):
if not isinstance(dir, pbf.Directory):
raise TypeEror, "dir must be a directory"
self.name = dir.getName()
self.dir = dir
self.descr = description
def createFromDir(dir):
"""Scan the directory containing a benchmark implementation
and create a BenchImpl object from it."""
# Get the description from a file, if provided
descr = process.read_description_file(dir)
return BenchImpl(dir, descr)
createFromDir = staticmethod(createFromDir)
def makefile(self, benchmark, target=None, action=None, platform=None, opt={}):
"""Run this implementation's makefile."""
self.platform = platform
Benchmark.instance_check(benchmark)
def perform():
srcdir = path.join('src', self.name)
builddir = path.join('build', self.name)
if self.platform == None: platform = 'default'
else: platform = self.platform
env={'SRCDIR':srcdir,
'BUILDDIR':builddir + '_' + platform,
'BIN':path.join(builddir+'_'+platform,benchmark.name),
'PARBOIL_ROOT':globals.root,
'PLATFORM':platform,
'BUILD':self.name}
env.update(opt)
mkfile = globals.root + os.sep + 'common' + os.sep + 'mk'
# Run the makefile to build the benchmark
ret = process.makefile(target=target,
action=action,
filepath=path.join(mkfile, "Makefile"),
env=env)
if ret == True:
return ErrorType.Success
else:
return ErrorType.CompileError
# Go to the benchmark directory before building
return process.with_path(benchmark.path, perform)
def build(self, benchmark, platform):
"""Build an executable of this benchmark implementation."""
return self.makefile(benchmark, action='build', platform=platform)
def isBuilt(self, benchmark, platform):
"""Determine whether the executable is up to date."""
return self.makefile(benchmark, action='q', platform=platform) == ErrorType.Success
def clean(self, benchmark, platform):
"""Remove build files for this benchmark implementation."""
return self.makefile(benchmark, action='clean', platform=platform)
def run(self, benchmark, dataset, do_output=True, extra_opts=[], platform=None):
"""Run this benchmark implementation.
Return True if the benchmark terminated normally or False
if there was an error."""
if platform == None:
self.platform = 'default'
else:
self.platform = platform
# Ensure that the benchmark has been built
if not self.isBuilt(benchmark, platform):
rc = self.build(benchmark, platform)
# Stop if 'make' failed
if rc != ErrorType.Success: return rc
def perform():
if self.platform == None:
platform = 'default'
else:
platform = self.platform
# Run the program
#exename = path.join('build', self.name+'_'+platform, benchmark.name)
#args = [exename] + extra_opts + dataset.getCommandLineArguments(benchmark, do_output)
#rc = process.spawnwaitv(exename, args)
args = extra_opts + dataset.getCommandLineArguments(benchmark, do_output)
args = reduce(lambda x, y: x + ' ' + y, args)
###
try:
rc = self.makefile(benchmark, action='run', platform=platform, opt={"ARGS":args})
except KeyboardInterrupt:
rc = ErrorType.Killed
# Program exited with error?
# if rc != 0: return ErrorType.RunFailed
# return ErrorType.Success
return rc
return process.with_path(benchmark.path, perform)
def debug(self, benchmark, dataset, do_output=True, extra_opts=[], platform=None):
"""Debug this benchmark implementation."""
if platform == None:
self.platform = 'default'
else:
self.platform = platform
# Ensure that the benchmark has been built
if not self.isBuilt(benchmark, platform):
rc = self.build(benchmark, platform)
# Stop if 'make' failed
if rc != ErrorType.Success: return rc
def perform():
if self.platform == None:
platform = 'default'
else:
platform = self.platform
# Run the program
args = extra_opts + dataset.getCommandLineArguments(benchmark, do_output)
args = reduce(lambda x, y: x + ' ' + y, args)
###
rc = self.makefile(benchmark, action='debug', platform=platform, opt={"ARGS":args})
# Program exited with error?
if rc != 0: return ErrorType.RunFailed
return ErrorType.Success
return process.with_path(benchmark.path, perform)
def check(self, benchmark, dataset):
"""Check the output from the last run of this benchmark
implementation.
Return True if the output checks successfully or False
otherwise."""
def perform():
output_file = dataset.getTemporaryOutputFile(benchmark).getPath()
reference_file = dataset.getReferenceOutputPath()
compare = os.path.join('tools', 'compare-output')
rc = process.spawnwaitl(compare,
compare, reference_file, output_file)
# Program exited with error, or mismatch in output?
if rc != 0: return False
return True
return process.with_path(benchmark.path, perform)
def __str__(self):
return "<BenchImpl '" + self.name + "'>"
class BenchDataset(object):
"""Data sets for running a benchmark."""
def __init__(self, dir, in_files=[], out_files=[], parameters=[],
description=None):
if not isinstance(dir, pbf.Directory):
raise TypeError, "dir must be a pbf.Directory"
self.name = dir.getName()
self.dir = dir
self.inFiles = in_files
self.outFiles = out_files
self.parameters = parameters
self.descr = description
def createFromDir(dir):
"""Scan the directory containing a dataset
and create a BenchDataset object from it."""
# Identify the paths where files may be found
input_dir = dir.getChildByName('input')
output_dir = dir.getChildByName('output')
#benchmark_path = path.join(globals.root, 'benchmarks', name)
def check_default_input_files():
# This function is called to see if the input file set
# guessed by scanning the input directory can be used
if invalid_default_input_files:
raise ValueError, "Cannot infer command line when there are multiple input files in a data set\n(Fix by adding an input DESCRIPTION file)"
if input_dir.exists():
input_descr = process.read_description_file(input_dir)
input_files = input_dir.scanAndReturnNames()
# If more than one input file was found, cannot use the default
# input file list produced by scanning the directory
invalid_default_input_files = len(input_files) > 1
else:
# If there's no input directory, assume the benchmark
# takes no input
input_descr = None
input_files = []
invalid_default_input_files = False
# Read the text of the input description file
if input_descr is not None:
(parameters, input_files1, input_descr) = \
unpack_dataset_description(input_descr, input_files=None)
if input_files1 is None:
# No override value given; use the default
check_default_input_files()
else:
input_files = input_files1
else:
check_default_input_files()
parameters = []
# Look for output files
output_descr = process.read_description_file(output_dir)
output_files = output_dir.scanAndReturnNames()
if len(output_files) > 1:
raise ValueError, "Multiple output files not supported"
# Concatenate input and output descriptions
if input_descr and output_descr:
descr = input_descr + "\n\n" + output_descr
else:
descr = input_descr or output_descr
return BenchDataset(dir, input_files, output_files, parameters, descr)
createFromDir = staticmethod(createFromDir)
def getName(self):
"""Get the name of this dataset."""
return self.name
def getTemporaryOutputDir(self, benchmark):
"""Get the pbf.Directory for the output of a benchmark run.
This function should always return the same pbf.Directory if its parameters
are the same. The output path is not the path where the reference
output is stored."""
rundir = globals.benchdir.getChildByName(benchmark.name).getChildByName('run')
if rundir.getChildByName(self.name) is None:
datasetpath = path.join(rundir.getPath(), self.name)
filepath = path.join(datasetpath, self.outFiles[0])
rundir.addChild(pbf.Directory(datasetpath, [pbf.File(filepath, False)]))
return rundir.getChildByName(self.name)
def getTemporaryOutputFile(self, benchmark):
"""Get the pbf.File for the output of a benchmark run.
This function should always return the same pbf.File if its parameters
are the same. The output path is not where the referrence output
is stored."""
return self.getTemporaryOutputDir(benchmark).getChildByName(self.outFiles[0])
def getReferenceOutputPath(self):
"""Get the name of the reference file, to which the output of a
benchmark run should be compared."""
return path.join(self.dir.getPath(), 'output', self.outFiles[0])
def getCommandLineArguments(self, benchmark, do_output=True):
"""Get the command line arguments that should be passed to the
executable to run this data set. If 'output' is True, then
the executable will be passed flags to save its output to a file.
Directories to hold ouptut files are created if they do not exist."""
args = []
# Add arguments to pass input files to the benchmark
if self.inFiles:
in_files = ",".join([path.join(self.dir.getPath(),'input', x)
for x in self.inFiles])
args.append("-i")
args.append(in_files)
# Add arguments to store the output somewhere, if output is
# desired
if do_output and self.outFiles:
if len(self.outFiles) != 1:
raise ValueError, "only one output file is supported"
out_file = self.getTemporaryOutputFile(benchmark)
args.append("-o")
args.append(out_file.getPath())
# Ensure that a directory exists for the output
self.getTemporaryOutputDir(benchmark).touch()
args += self.parameters
return args
def __str__(self):
return "<BenchData '" + self.name + "'>"
def unpack_dataset_description(descr, parameters=[], input_files=[]):
"""Read information from the raw contents of a data set description
file. Optional 'parameters' and 'input_files' arguments may be
given, which will be retained unless overridden by the description
file."""
leftover = []
split_at_colon = re.compile(r"^\s*([a-zA-Z]+)\s*:(.*)$")
# Initialize these to default empty strings
parameter_text = None
input_file_text = None
# Scan the description line by line
for line in descr.split('\n'):
m = split_at_colon.match(line)
if m is None: continue
# This line appears to declare something that should be
# interpreted
keyword = m.group(1)
if keyword == "Parameters":
parameter_text = m.group(2)
elif keyword == "Inputs":
input_file_text = m.group(2)
# else, ignore the line
# Split the strings into (possibly) multiple arguments, discarding
# whitespace
if parameter_text is not None: parameters = parameter_text.split()
if input_file_text is not None: input_files = input_file_text.split()
return (parameters, input_files, descr)
def version_scanner():
"""version_scanner() -> (path -> pbf.Directory)
Return a function to find benchmark versions in the src
directory for the benchmark."""
return lambda x: pbf.scan_file(x, True, lambda y: pbf.Directory(y), ['.svn'])
def find_benchmarks():
"""Find benchmarks in the repository. The benchmarks are
identified, but their contents are not scanned immediately. A
dictionary is returned mapping benchmark names to futures
containing the benchmarks."""
if not globals.root:
raise ValueError, "root directory has not been set"
# Scan all benchmarks in the 'benchmarks' directory and
# lazily create benchmark objects.
db = {}
try:
globals.benchdir.scan()
globals.datadir.scan()
for bmkdir in globals.benchdir.getScannedChildren():
bmk = Future(lambda bmkdir=bmkdir: Benchmark.createFromName(bmkdir.getName()))
db[bmkdir.getName()] = bmk
except OSError, e:
sys.stdout.write("Benchmark directory not found!\n\n")
return {}
return db
def _desc_file(dpath):
"""_desc_file(dpath)
Returns a pbf.File for an optional description file in the directory dpath."""
return pbf.File(path.join(dpath,'DESCRIPTION'), False)
def benchmark_scanner():
"""benchmark_scanner -> (path -> pbf.Directory)
Returns a function which will scan a filename and create a pbf.Directory
for a benchmark represented by that name."""
def create_benchmark_dir(dpath):
expected = [pbf.Directory(path.join(dpath,'src'), [], version_scanner()),
pbf.Directory(path.join(dpath,'tools'),
[pbf.File(path.join(dpath,'compare-output'))]),
pbf.Directory(path.join(dpath,'build'), must_exist=False),
pbf.Directory(path.join(dpath,'run'), must_exist=False),
_desc_file(dpath)]
return pbf.Directory(dpath, expected)
return lambda x: pbf.scan_file(x, True, create_benchmark_dir,['_darcs','.svn'])
def dataset_scanner():
"""dataset_scanner -> (path -> pbf.Directory)
Returns a function which will scan a filename and create a pbf.Directory
for a folder containing datasets for the benchmark of the same name."""
def create_dataset_dir(dpath):
simple_scan = lambda x: pbf.scan_file(x)
expected = [pbf.Directory(path.join(dpath,'input'),
[_desc_file(path.join(dpath,'input'))], simple_scan),
pbf.Directory(path.join(dpath,'output'), [], simple_scan),
_desc_file(dpath)]
return pbf.Directory(dpath, expected)
return lambda x: pbf.scan_file(x, True, create_dataset_dir, ['.svn', '_darcs'])
def dataset_repo_scanner():
"""dataset_repo_scanner -> (path -> pbf.Directory)
Returns a function which will scan a filename and create a pbf.Directory
for a folder containing a dataset repository for parboil benchmarks."""
benchmark_dsets_scanner = lambda x: pbf.Directory(x, [], dataset_scanner())
return lambda x: pbf.scan_file(x, True, benchmark_dsets_scanner)
| mit | -8,829,396,101,898,536,000 | 35.921002 | 154 | 0.608235 | false | 4.42132 | false | false | false |
theo-l/django | tests/admin_inlines/models.py | 10 | 7855 | """
Testing of admin inline formsets.
"""
import random
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Parent(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Teacher(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Child(models.Model):
name = models.CharField(max_length=50)
teacher = models.ForeignKey(Teacher, models.CASCADE)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey()
def __str__(self):
return 'I am %s, a child of %s' % (self.name, self.parent)
class Book(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Author(models.Model):
name = models.CharField(max_length=50)
books = models.ManyToManyField(Book)
class NonAutoPKBook(models.Model):
rand_pk = models.IntegerField(primary_key=True, editable=False)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
def save(self, *args, **kwargs):
while not self.rand_pk:
test_pk = random.randint(1, 99999)
if not NonAutoPKBook.objects.filter(rand_pk=test_pk).exists():
self.rand_pk = test_pk
super().save(*args, **kwargs)
class NonAutoPKBookChild(NonAutoPKBook):
pass
class EditablePKBook(models.Model):
manual_pk = models.IntegerField(primary_key=True)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
class Holder(models.Model):
dummy = models.IntegerField()
class Inner(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder, models.CASCADE)
readonly = models.CharField("Inner readonly label", max_length=1)
def get_absolute_url(self):
return '/inner/'
class Holder2(models.Model):
dummy = models.IntegerField()
class Inner2(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder2, models.CASCADE)
class Holder3(models.Model):
dummy = models.IntegerField()
class Inner3(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder3, models.CASCADE)
# Models for ticket #8190
class Holder4(models.Model):
dummy = models.IntegerField()
class Inner4Stacked(models.Model):
dummy = models.IntegerField(help_text="Awesome stacked help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Meta:
constraints = [
models.UniqueConstraint(fields=['dummy', 'holder'], name='unique_stacked_dummy_per_holder')
]
class Inner4Tabular(models.Model):
dummy = models.IntegerField(help_text="Awesome tabular help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Meta:
constraints = [
models.UniqueConstraint(fields=['dummy', 'holder'], name='unique_tabular_dummy_per_holder')
]
# Models for ticket #31441
class Holder5(models.Model):
dummy = models.IntegerField()
class Inner5Stacked(models.Model):
name = models.CharField(max_length=10)
select = models.CharField(choices=(('1', 'One'), ('2', 'Two')), max_length=10)
text = models.TextField()
dummy = models.IntegerField()
holder = models.ForeignKey(Holder5, models.CASCADE)
class Inner5Tabular(models.Model):
name = models.CharField(max_length=10)
select = models.CharField(choices=(('1', 'One'), ('2', 'Two')), max_length=10)
text = models.TextField()
dummy = models.IntegerField()
holder = models.ForeignKey(Holder5, models.CASCADE)
# Models for #12749
class Person(models.Model):
firstname = models.CharField(max_length=15)
class OutfitItem(models.Model):
name = models.CharField(max_length=15)
class Fashionista(models.Model):
person = models.OneToOneField(Person, models.CASCADE, primary_key=True)
weaknesses = models.ManyToManyField(OutfitItem, through='ShoppingWeakness', blank=True)
class ShoppingWeakness(models.Model):
fashionista = models.ForeignKey(Fashionista, models.CASCADE)
item = models.ForeignKey(OutfitItem, models.CASCADE)
# Models for #13510
class TitleCollection(models.Model):
pass
class Title(models.Model):
collection = models.ForeignKey(TitleCollection, models.SET_NULL, blank=True, null=True)
title1 = models.CharField(max_length=100)
title2 = models.CharField(max_length=100)
# Models for #15424
class Poll(models.Model):
name = models.CharField(max_length=40)
class Question(models.Model):
text = models.CharField(max_length=40)
poll = models.ForeignKey(Poll, models.CASCADE)
class Novel(models.Model):
name = models.CharField(max_length=40)
class NovelReadonlyChapter(Novel):
class Meta:
proxy = True
class Chapter(models.Model):
name = models.CharField(max_length=40)
novel = models.ForeignKey(Novel, models.CASCADE)
class FootNote(models.Model):
"""
Model added for ticket 19838
"""
chapter = models.ForeignKey(Chapter, models.PROTECT)
note = models.CharField(max_length=40)
# Models for #16838
class CapoFamiglia(models.Model):
name = models.CharField(max_length=100)
class Consigliere(models.Model):
name = models.CharField(max_length=100, help_text='Help text for Consigliere')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class SottoCapo(models.Model):
name = models.CharField(max_length=100)
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class ReadOnlyInline(models.Model):
name = models.CharField(max_length=100, help_text='Help text for ReadOnlyInline')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE)
# Models for #18433
class ParentModelWithCustomPk(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
class ChildModel1(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model1/'
class ChildModel2(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model2/'
# Models for #19425
class BinaryTree(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True)
# Models for #19524
class LifeForm(models.Model):
pass
class ExtraTerrestrial(LifeForm):
name = models.CharField(max_length=100)
class Sighting(models.Model):
et = models.ForeignKey(ExtraTerrestrial, models.CASCADE)
place = models.CharField(max_length=100)
# Models for #18263
class SomeParentModel(models.Model):
name = models.CharField(max_length=1)
class SomeChildModel(models.Model):
name = models.CharField(max_length=1)
position = models.PositiveIntegerField()
parent = models.ForeignKey(SomeParentModel, models.CASCADE)
readonly_field = models.CharField(max_length=1)
# Other models
class ProfileCollection(models.Model):
pass
class Profile(models.Model):
collection = models.ForeignKey(ProfileCollection, models.SET_NULL, blank=True, null=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
| bsd-3-clause | 4,038,649,694,587,854,300 | 24.669935 | 103 | 0.708466 | false | 3.554299 | false | false | false |
bchareyre/ratchet | gui/qt4/SerializableEditor.py | 1 | 34633 | # encoding: utf-8
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4 import QtGui
import re,itertools
import logging
logging.trace=logging.debug
logging.basicConfig(level=logging.INFO)
from yade import *
import yade.qt
try:
from minieigen import *
except ImportError:
from miniEigen import *
seqSerializableShowType=True # show type headings in serializable sequences (takes vertical space, but makes the type hyperlinked)
# BUG: cursor is moved to the beginnign of the input field even if it has focus
#
# checking for focus seems to return True always and cursor is never moved
#
# the 'True or' part effectively disables the condition (so that the cursor is moved always), but it might be fixed in the future somehow
#
# if True or w.hasFocus(): w.home(False)
#
#
def makeWrapperHref(text,className,attr=None,static=False):
"""Create clickable HTML hyperlink to a Yade class or its attribute.
:param className: name of the class to link to.
:param attr: attribute to link to. If given, must exist directly in given *className*; if not given or empty, link to the class itself is created and *attr* is ignored.
:return: HTML with the hyperref.
"""
if not static: return '<a href="%s#yade.wrapper.%s%s">%s</a>'%(yade.qt.sphinxDocWrapperPage,className,(('.'+attr) if attr else ''),text)
else: return '<a href="%s#ystaticattr-%s.%s">%s</a>'%(yade.qt.sphinxDocWrapperPage,className,attr,text)
def serializableHref(ser,attr=None,text=None):
"""Return HTML href to a *ser* optionally to the attribute *attr*.
The class hierarchy is crawled upwards to find out in which parent class is *attr* defined,
so that the href target is a valid link. In that case, only single inheritace is assumed and
the first class from the top defining *attr* is used.
:param ser: object of class deriving from :yref:`Serializable`, or string; if string, *attr* must be empty.
:param attr: name of the attribute to link to; if empty, linke to the class itself is created.
:param text: visible text of the hyperlink; if not given, either class name or attribute name without class name (when *attr* is not given) is used.
:returns: HTML with the hyperref.
"""
# klass is a class name given as string
if isinstance(ser,str):
if attr: raise InvalidArgument("When *ser* is a string, *attr* must be empty (only class link can be created)")
return makeWrapperHref(text if text else ser,ser)
# klass is a type object
if attr:
klass=ser.__class__
while attr in dir(klass.__bases__[0]): klass=klass.__bases__[0]
if not text: text=attr
else:
klass=ser.__class__
if not text: text=klass.__name__
return makeWrapperHref(text,klass.__name__,attr,static=(attr and getattr(klass,attr)==getattr(ser,attr)))
class AttrEditor():
"""Abstract base class handing some aspects common to all attribute editors.
Holds exacly one attribute which is updated whenever it changes."""
def __init__(self,getter=None,setter=None):
self.getter,self.setter=getter,setter
self.hot,self.focused=False,False
self.widget=None
def refresh(self): pass
def update(self): pass
def isHot(self,hot=True):
"Called when the widget gets focus; mark it hot, change colors etc."
if hot==self.hot: return
self.hot=hot
if hot: self.setStyleSheet('QWidget { background: red }')
else: self.setStyleSheet('QWidget { background: none }')
def sizeHint(self): return QSize(150,12)
def trySetter(self,val):
try: self.setter(val)
except AttributeError: self.setEnabled(False)
self.isHot(False)
class AttrEditor_Bool(AttrEditor,QFrame):
def __init__(self,parent,getter,setter):
AttrEditor.__init__(self,getter,setter)
QFrame.__init__(self,parent)
self.checkBox=QCheckBox(self)
lay=QVBoxLayout(self); lay.setSpacing(0); lay.setMargin(0); lay.addStretch(1); lay.addWidget(self.checkBox); lay.addStretch(1)
self.checkBox.clicked.connect(self.update)
def refresh(self): self.checkBox.setChecked(self.getter())
def update(self): self.trySetter(self.checkBox.isChecked())
class AttrEditor_Int(AttrEditor,QSpinBox):
def __init__(self,parent,getter,setter):
AttrEditor.__init__(self,getter,setter)
QSpinBox.__init__(self,parent)
self.setRange(int(-1e9),int(1e9)); self.setSingleStep(1);
self.valueChanged.connect(self.update)
def refresh(self): self.setValue(self.getter())
def update(self): self.trySetter(self.value())
class AttrEditor_Str(AttrEditor,QLineEdit):
def __init__(self,parent,getter,setter):
AttrEditor.__init__(self,getter,setter)
QLineEdit.__init__(self,parent)
self.textEdited.connect(self.isHot)
self.selectionChanged.connect(self.isHot)
self.editingFinished.connect(self.update)
def refresh(self): self.setText(self.getter())
def update(self): self.trySetter(str(self.text()))
class AttrEditor_Float(AttrEditor,QLineEdit):
def __init__(self,parent,getter,setter):
AttrEditor.__init__(self,getter,setter)
QLineEdit.__init__(self,parent)
self.textEdited.connect(self.isHot)
self.selectionChanged.connect(self.isHot)
self.editingFinished.connect(self.update)
def refresh(self):
self.setText(str(self.getter()));
if True or not self.hasFocus(): self.home(False)
def update(self):
try: self.trySetter(float(self.text()))
except ValueError: self.refresh()
class AttrEditor_Quaternion(AttrEditor,QFrame):
def __init__(self,parent,getter,setter):
AttrEditor.__init__(self,getter,setter)
QFrame.__init__(self,parent)
self.grid=QHBoxLayout(self); self.grid.setSpacing(0); self.grid.setMargin(0)
for i in range(4):
if i==3:
f=QFrame(self); f.setFrameShape(QFrame.VLine); f.setFrameShadow(QFrame.Sunken); f.setFixedWidth(4) # add vertical divider (axis | angle)
self.grid.addWidget(f)
w=QLineEdit('')
self.grid.addWidget(w);
w.textEdited.connect(self.isHot)
w.selectionChanged.connect(self.isHot)
w.editingFinished.connect(self.update)
def refresh(self):
val=self.getter(); axis,angle=val.toAxisAngle()
for i in (0,1,2,4):
w=self.grid.itemAt(i).widget(); w.setText(str(axis[i] if i<3 else angle));
if True or not w.hasFocus(): w.home(False)
def update(self):
try:
x=[float((self.grid.itemAt(i).widget().text())) for i in (0,1,2,4)]
except ValueError: self.refresh()
q=Quaternion(Vector3(x[0],x[1],x[2]),x[3]); q.normalize() # from axis-angle
self.trySetter(q)
def setFocus(self): self.grid.itemAt(0).widget().setFocus()
class AttrEditor_Se3(AttrEditor,QFrame):
def __init__(self,parent,getter,setter):
AttrEditor.__init__(self,getter,setter)
QFrame.__init__(self,parent)
self.grid=QGridLayout(self); self.grid.setSpacing(0); self.grid.setMargin(0)
for row,col in itertools.product(range(2),range(5)): # one additional column for vertical line in quaternion
if (row,col)==(0,3): continue
if (row,col)==(0,4): self.grid.addWidget(QLabel(u'←<i>pos</i> ↙<i>ori</i>',self),row,col); continue
if (row,col)==(1,3):
f=QFrame(self); f.setFrameShape(QFrame.VLine); f.setFrameShadow(QFrame.Sunken); f.setFixedWidth(4); self.grid.addWidget(f,row,col); continue
w=QLineEdit('')
self.grid.addWidget(w,row,col);
w.textEdited.connect(self.isHot)
w.selectionChanged.connect(self.isHot)
w.editingFinished.connect(self.update)
def refresh(self):
pos,ori=self.getter(); axis,angle=ori.toAxisAngle()
for i in (0,1,2,4):
w=self.grid.itemAtPosition(1,i).widget(); w.setText(str(axis[i] if i<3 else angle));
if True or not w.hasFocus(): w.home(False)
for i in (0,1,2):
w=self.grid.itemAtPosition(0,i).widget(); w.setText(str(pos[i]));
if True or not w.hasFocus(): w.home(False)
def update(self):
try:
q=[float((self.grid.itemAtPosition(1,i).widget().text())) for i in (0,1,2,4)]
v=[float((self.grid.itemAtPosition(0,i).widget().text())) for i in (0,1,2)]
except ValueError: self.refresh()
qq=Quaternion(Vector3(q[0],q[1],q[2]),q[3]); qq.normalize() # from axis-angle
self.trySetter((v,qq))
def setFocus(self): self.grid.itemAtPosition(0,0).widget().setFocus()
class AttrEditor_MatrixX(AttrEditor,QFrame):
def __init__(self,parent,getter,setter,rows,cols,idxConverter):
'idxConverter converts row,col tuple to either (row,col), (col) etc depending on what access is used for []'
AttrEditor.__init__(self,getter,setter)
QFrame.__init__(self,parent)
self.rows,self.cols=rows,cols
self.idxConverter=idxConverter
self.setContentsMargins(0,0,0,0)
val=self.getter()
self.grid=QGridLayout(self); self.grid.setSpacing(0); self.grid.setMargin(0)
for row,col in itertools.product(range(self.rows),range(self.cols)):
w=QLineEdit('')
self.grid.addWidget(w,row,col);
w.textEdited.connect(self.isHot)
w.selectionChanged.connect(self.isHot)
w.editingFinished.connect(self.update)
def refresh(self):
val=self.getter()
for row,col in itertools.product(range(self.rows),range(self.cols)):
w=self.grid.itemAtPosition(row,col).widget()
w.setText(str(val[self.idxConverter(row,col)]))
if True or not w.hasFocus: w.home(False) # make the left-most part visible, if the text is wider than the widget
def update(self):
try:
val=self.getter()
for row,col in itertools.product(range(self.rows),range(self.cols)):
w=self.grid.itemAtPosition(row,col).widget()
if w.isModified(): val[self.idxConverter(row,col)]=float(w.text())
logging.debug('setting'+str(val))
self.trySetter(val)
except ValueError: self.refresh()
def setFocus(self): self.grid.itemAtPosition(0,0).widget().setFocus()
class AttrEditor_MatrixXi(AttrEditor,QFrame):
def __init__(self,parent,getter,setter,rows,cols,idxConverter):
'idxConverter converts row,col tuple to either (row,col), (col) etc depending on what access is used for []'
AttrEditor.__init__(self,getter,setter)
QFrame.__init__(self,parent)
self.rows,self.cols=rows,cols
self.idxConverter=idxConverter
self.setContentsMargins(0,0,0,0)
self.grid=QGridLayout(self); self.grid.setSpacing(0); self.grid.setMargin(0)
for row,col in itertools.product(range(self.rows),range(self.cols)):
w=QSpinBox()
w.setRange(int(-1e9),int(1e9)); w.setSingleStep(1);
self.grid.addWidget(w,row,col);
self.refresh() # refresh before connecting signals!
for row,col in itertools.product(range(self.rows),range(self.cols)):
self.grid.itemAtPosition(row,col).widget().valueChanged.connect(self.update)
def refresh(self):
val=self.getter()
for row,col in itertools.product(range(self.rows),range(self.cols)):
w=self.grid.itemAtPosition(row,col).widget().setValue(val[self.idxConverter(row,col)])
def update(self):
val=self.getter(); modified=False
for row,col in itertools.product(range(self.rows),range(self.cols)):
w=self.grid.itemAtPosition(row,col).widget()
if w.value()!=val[self.idxConverter(row,col)]:
modified=True; val[self.idxConverter(row,col)]=w.value()
if not modified: return
logging.debug('setting'+str(val))
self.trySetter(val)
def setFocus(self): self.grid.itemAtPosition(0,0).widget().setFocus()
class AttrEditor_Vector6i(AttrEditor_MatrixXi):
def __init__(self,parent,getter,setter):
AttrEditor_MatrixXi.__init__(self,parent,getter,setter,1,6,lambda r,c:c)
class AttrEditor_Vector3i(AttrEditor_MatrixXi):
def __init__(self,parent,getter,setter):
AttrEditor_MatrixXi.__init__(self,parent,getter,setter,1,3,lambda r,c:c)
class AttrEditor_Vector2i(AttrEditor_MatrixXi):
def __init__(self,parent,getter,setter):
AttrEditor_MatrixXi.__init__(self,parent,getter,setter,1,2,lambda r,c:c)
class AttrEditor_Vector6(AttrEditor_MatrixX):
def __init__(self,parent,getter,setter):
AttrEditor_MatrixX.__init__(self,parent,getter,setter,1,6,lambda r,c:c)
class AttrEditor_Vector3(AttrEditor_MatrixX):
def __init__(self,parent,getter,setter):
AttrEditor_MatrixX.__init__(self,parent,getter,setter,1,3,lambda r,c:c)
class AttrEditor_Vector2(AttrEditor_MatrixX):
def __init__(self,parent,getter,setter):
AttrEditor_MatrixX.__init__(self,parent,getter,setter,1,2,lambda r,c:c)
class AttrEditor_Matrix3(AttrEditor_MatrixX):
def __init__(self,parent,getter,setter):
AttrEditor_MatrixX.__init__(self,parent,getter,setter,3,3,lambda r,c:(r,c))
class Se3FakeType: pass
_fundamentalEditorMap={bool:AttrEditor_Bool,str:AttrEditor_Str,int:AttrEditor_Int,float:AttrEditor_Float,Quaternion:AttrEditor_Quaternion,Vector2:AttrEditor_Vector2,Vector3:AttrEditor_Vector3,Vector6:AttrEditor_Vector6,Matrix3:AttrEditor_Matrix3,Vector6i:AttrEditor_Vector6i,Vector3i:AttrEditor_Vector3i,Vector2i:AttrEditor_Vector2i,Se3FakeType:AttrEditor_Se3}
_fundamentalInitValues={bool:True,str:'',int:0,float:0.0,Quaternion:Quaternion((0,1,0),0.0),Vector3:Vector3.Zero,Matrix3:Matrix3.Zero,Vector6:Vector6.Zero,Vector6i:Vector6i.Zero,Vector3i:Vector3i.Zero,Vector2i:Vector2i.Zero,Vector2:Vector2.Zero,Se3FakeType:(Vector3.Zero,Quaternion((0,1,0),0.0))}
class SerQLabel(QLabel):
def __init__(self,parent,label,tooltip,path):
QLabel.__init__(self,parent)
self.path=path
self.setText(label)
if tooltip or path: self.setToolTip(('<b>'+path+'</b><br>' if self.path else '')+(tooltip if tooltip else ''))
self.linkActivated.connect(yade.qt.openUrl)
def mousePressEvent(self,event):
if event.button()!=Qt.MidButton:
event.ignore(); return
# middle button clicked, paste pasteText to clipboard
cb=QApplication.clipboard()
cb.setText(self.path,mode=QClipboard.Clipboard)
cb.setText(self.path,mode=QClipboard.Selection) # X11 global selection buffer
event.accept()
class SerializableEditor(QFrame):
"Class displaying and modifying serializable attributes of a yade object."
import collections
import logging
# each attribute has one entry associated with itself
class EntryData:
def __init__(self,name,T,flags=0):
self.name,self.T,self.flags=name,T,flags
self.lineNo,self.widget=None,None
def __init__(self,ser,parent=None,ignoredAttrs=set(),showType=False,path=None):
"Construct window, *ser* is the object we want to show."
QtGui.QFrame.__init__(self,parent)
self.ser=ser
self.path=(ser.label if (hasattr(ser,'label') and ser.label) else path)
self.showType=showType
self.hot=False
self.entries=[]
self.ignoredAttrs=ignoredAttrs
logging.debug('New Serializable of type %s'%ser.__class__.__name__)
self.setWindowTitle(str(ser))
self.mkWidgets()
self.refreshTimer=QTimer(self)
self.refreshTimer.timeout.connect(self.refreshEvent)
self.refreshTimer.start(500)
def getListTypeFromDocstring(self,attr):
"Guess type of array by scanning docstring for :yattrtype: and parsing its argument; ugly, but works."
doc=getattr(self.ser.__class__,attr).__doc__
if doc==None:
logging.error("Attribute %s has no docstring."%attr)
return None
m=re.search(r':yattrtype:`([^`]*)`',doc)
if not m:
logging.error("Attribute %s does not contain :yattrtype:`....` (docstring is '%s'"%(attr,doc))
return None
cxxT=m.group(1)
logging.debug('Got type "%s" from :yattrtype:'%cxxT)
def vecTest(T,cxxT):
#regexp=r'^\s*(std\s*::)?\s*vector\s*<\s*(std\s*::)?\s*('+T+r')\s*>\s*$'
regexp=r'^\s*(std\s*::)?\s*vector\s*<\s*(shared_ptr\s*<\s*)?\s*(std\s*::)?\s*('+T+r')(\s*>)?\s*>\s*$'
m=re.match(regexp,cxxT)
return m
vecMap={
'bool':bool,'int':int,'long':int,'Body::id_t':long,'size_t':long,
'Real':float,'float':float,'double':float,
'Vector6r':Vector6,'Vector6i':Vector6i,'Vector3i':Vector3i,'Vector2r':Vector2,'Vector2i':Vector2i,
'Vector3r':Vector3,'Matrix3r':Matrix3,'Se3r':Se3FakeType,
'string':str,
#'BodyCallback':BodyCallback,
'IntrCallback':IntrCallback,'BoundFunctor':BoundFunctor,'IGeomFunctor':IGeomFunctor,'IPhysFunctor':IPhysFunctor,'LawFunctor':LawFunctor,'KinematicEngine':KinematicEngine,
'GlShapeFunctor':GlShapeFunctor,'GlStateFunctor':GlStateFunctor,'GlIGeomFunctor':GlIGeomFunctor,'GlIPhysFunctor':GlIPhysFunctor,'GlBoundFunctor':GlBoundFunctor,'GlExtraDrawer':GlExtraDrawer
}
for T,ret in vecMap.items():
if vecTest(T,cxxT):
logging.debug("Got type %s from cxx type %s"%(repr(ret),cxxT))
return (ret,)
logging.error("Unable to guess python type from cxx type '%s'"%cxxT)
return None
def mkAttrEntries(self):
if self.ser==None: return
try:
d=self.ser.dict()
except TypeError:
logging.error('TypeError when getting attributes of '+str(self.ser)+',skipping. ')
import traceback
traceback.print_exc()
attrs=self.ser.dict().keys(); attrs.sort()
for attr in attrs:
val=getattr(self.ser,attr) # get the value using serattr, as it might be different from what the dictionary provides (e.g. Body.blockedDOFs)
t=None
doc=getattr(self.ser.__class__,attr).__doc__;
if '|yhidden|' in doc: continue
if attr in self.ignoredAttrs: continue
if isinstance(val,list):
t=self.getListTypeFromDocstring(attr)
if not t and len(val)==0: t=(val[0].__class__,) # 1-tuple is list of the contained type
#if not t: raise RuntimeError('Unable to guess type of '+str(self.ser)+'.'+attr)
# hack for Se3, which is returned as (Vector3,Quaternion) in python
elif isinstance(val,tuple) and len(val)==2 and val[0].__class__==Vector3 and val[1].__class__==Quaternion: t=Se3FakeType
else: t=val.__class__
match=re.search(':yattrflags:`\s*([0-9]+)\s*`',doc) # non-empty attribute
flags=int(match.group(1)) if match else 0
#logging.debug('Attr %s is of type %s'%(attr,((t[0].__name__,) if isinstance(t,tuple) else t.__name__)))
self.entries.append(self.EntryData(name=attr,T=t))
def getDocstring(self,attr=None):
"If attr is *None*, return docstring of the Serializable itself"
doc=(getattr(self.ser.__class__,attr).__doc__ if attr else self.ser.__class__.__doc__)
if not doc: return ''
doc=re.sub(':y(attrtype|default|attrflags):`[^`]*`','',doc)
statAttr=re.compile('^.. ystaticattr::.*$',re.MULTILINE|re.DOTALL)
doc=re.sub(statAttr,'',doc) # static classes have their proper docs at the beginning, discard static memeber docs
# static: attribute of the type is the same object as attribute of the instance
# in that case, get docstring from the class documentation by parsing it
if attr and getattr(self.ser.__class__,attr)==getattr(self.ser,attr): doc=self.getStaticAttrDocstring(attr)
doc=re.sub(':yref:`([^`]*)`','\\1',doc)
import textwrap
wrapper=textwrap.TextWrapper(replace_whitespace=False)
return wrapper.fill(textwrap.dedent(doc))
def getStaticAttrDocstring(self,attr):
ret=''; c=self.ser.__class__
while hasattr(c,attr) and hasattr(c.__base__,attr): c=c.__base__
start='.. ystaticattr:: %s.%s('%(c.__name__,attr)
if start in c.__doc__:
ll=c.__doc__.split('\n')
for i in range(len(ll)):
if ll[i].startswith(start): break
for i in range(i+1,len(ll)):
if len(ll[i])>0 and ll[i][0] not in ' \t': break
ret+=ll[i]
return ret
else: return '[no documentation found]'
def mkWidget(self,entry):
if not entry.T: return None
# single fundamental object
Klass=_fundamentalEditorMap.get(entry.T,None)
getter,setter=lambda: getattr(self.ser,entry.name), lambda x: setattr(self.ser,entry.name,x)
if Klass:
widget=Klass(self,getter=getter,setter=setter)
widget.setFocusPolicy(Qt.StrongFocus)
if (entry.flags & AttrFlags.readonly): widget.setEnabled(False)
return widget
# sequences
if entry.T.__class__==tuple:
assert(len(entry.T)==1) # we don't handle tuples of other lenghts
# sequence of serializables
T=entry.T[0]
if (issubclass(T,Serializable) or T==Serializable):
widget=SeqSerializable(self,getter,setter,T,path=(self.path+'.'+entry.name if self.path else None),shrink=True)
return widget
if (T in _fundamentalEditorMap):
widget=SeqFundamentalEditor(self,getter,setter,T)
return widget
return None
# a serializable
if issubclass(entry.T,Serializable) or entry.T==Serializable:
obj=getattr(self.ser,entry.name)
if hasattr(obj,'label') and obj.label: path=obj.label
elif self.path: path=self.path+'.'+entry.name
else: path=None
widget=SerializableEditor(getattr(self.ser,entry.name),parent=self,showType=self.showType,path=(self.path+'.'+entry.name if self.path else None))
widget.setFrameShape(QFrame.Box); widget.setFrameShadow(QFrame.Raised); widget.setLineWidth(1)
return widget
return None
def mkWidgets(self):
self.mkAttrEntries()
grid=QFormLayout()
grid.setContentsMargins(2,2,2,2)
grid.setVerticalSpacing(0)
grid.setLabelAlignment(Qt.AlignRight)
if self.showType:
lab=SerQLabel(self,makeSerializableLabel(self.ser,addr=True,href=True),tooltip=self.getDocstring(),path=self.path)
lab.setFrameShape(QFrame.Box); lab.setFrameShadow(QFrame.Sunken); lab.setLineWidth(2); lab.setAlignment(Qt.AlignHCenter); lab.linkActivated.connect(yade.qt.openUrl)
grid.setWidget(0,QFormLayout.SpanningRole,lab)
for entry in self.entries:
entry.widget=self.mkWidget(entry)
objPath=(self.path+'.'+entry.name) if self.path else None
label=SerQLabel(self,serializableHref(self.ser,entry.name),tooltip=self.getDocstring(entry.name),path=objPath)
grid.addRow(label,entry.widget if entry.widget else QLabel('<i>unhandled type</i>'))
self.setLayout(grid)
self.refreshEvent()
def refreshEvent(self):
for e in self.entries:
if e.widget and not e.widget.hot: e.widget.refresh()
def refresh(self): pass
def makeSerializableLabel(ser,href=False,addr=True,boldHref=True,num=-1,count=-1):
ret=u''
if num>=0:
if count>=0: ret+=u'%d/%d. '%(num,count)
else: ret+=u'%d. '%num
if href: ret+=(u' <b>' if boldHref else u' ')+serializableHref(ser)+(u'</b> ' if boldHref else u' ')
else: ret+=ser.__class__.__name__+' '
if hasattr(ser,'label') and ser.label: ret+=u' “'+unicode(ser.label)+u'”'
# do not show address if there is a label already
elif addr:
import re
ss=unicode(ser); m=re.match(u'<(.*) instance at (0x.*)>',ss)
if m: ret+=m.group(2)
else: logging.warning(u"Serializable converted to str ('%s') does not contain 'instance at 0x…'"%ss)
return ret
class SeqSerializableComboBox(QFrame):
def __init__(self,parent,getter,setter,serType,path=None,shrink=False):
QFrame.__init__(self,parent)
self.getter,self.setter,self.serType,self.path,self.shrink=getter,setter,serType,path,shrink
self.layout=QVBoxLayout(self)
topLineFrame=QFrame(self)
topLineLayout=QHBoxLayout(topLineFrame);
for l in self.layout, topLineLayout: l.setSpacing(0); l.setContentsMargins(0,0,0,0)
topLineFrame.setLayout(topLineLayout)
buttons=(self.newButton,self.killButton,self.upButton,self.downButton)=[QPushButton(label,self) for label in (u'☘',u'☠',u'↑',u'↓')]
buttonSlots=(self.newSlot,self.killSlot,self.upSlot,self.downSlot) # same order as buttons
for b in buttons: b.setStyleSheet('QPushButton { font-size: 15pt; }'); b.setFixedWidth(30); b.setFixedHeight(30)
self.combo=QComboBox(self)
self.combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)
for w in buttons[0:2]+[self.combo,]+buttons[2:4]: topLineLayout.addWidget(w)
self.layout.addWidget(topLineFrame) # nested layout
self.scroll=QScrollArea(self); self.scroll.setWidgetResizable(True)
self.layout.addWidget(self.scroll)
self.seqEdit=None # currently edited serializable
self.setLayout(self.layout)
self.hot=None # API compat with SerializableEditor
self.setFrameShape(QFrame.Box); self.setFrameShadow(QFrame.Raised); self.setLineWidth(1)
# signals
for b,slot in zip(buttons,buttonSlots): b.clicked.connect(slot)
self.combo.currentIndexChanged.connect(self.comboIndexSlot)
self.refreshEvent()
# periodic refresh
self.refreshTimer=QTimer(self)
self.refreshTimer.timeout.connect(self.refreshEvent)
self.refreshTimer.start(1000) # 1s should be enough
#print 'SeqSerializable path is',self.path
def comboIndexSlot(self,ix): # different seq item selected
currSeq=self.getter();
if len(currSeq)==0: ix=-1
logging.debug('%s comboIndexSlot len=%d, ix=%d'%(self.serType.__name__,len(currSeq),ix))
self.downButton.setEnabled(ix<len(currSeq)-1)
self.upButton.setEnabled(ix>0)
self.combo.setEnabled(ix>=0)
if ix>=0:
ser=currSeq[ix]
self.seqEdit=SerializableEditor(ser,parent=self,showType=seqSerializableShowType,path=(self.path+'['+str(ix)+']') if self.path else None)
self.scroll.setWidget(self.seqEdit)
if self.shrink:
self.sizeHint=lambda: QSize(100,1000)
self.scroll.sizeHint=lambda: QSize(100,1000)
self.sizePolicy().setVerticalPolicy(QSizePolicy.Expanding)
self.scroll.sizePolicy().setVerticalPolicy(QSizePolicy.Expanding)
self.setMinimumHeight(min(300,self.seqEdit.height()+self.combo.height()+10))
self.setMaximumHeight(100000)
self.scroll.setMaximumHeight(100000)
else:
self.scroll.setWidget(QFrame())
if self.shrink:
self.setMaximumHeight(self.combo.height()+10);
self.scroll.setMaximumHeight(0)
def serLabel(self,ser,i=-1):
return ('' if i<0 else str(i)+'. ')+str(ser)[1:-1].replace('instance at ','')
def refreshEvent(self,forceIx=-1):
currSeq=self.getter()
comboEnabled=self.combo.isEnabled()
if comboEnabled and len(currSeq)==0: self.comboIndexSlot(-1) # force refresh, otherwise would not happen from the initially empty state
ix,cnt=self.combo.currentIndex(),self.combo.count()
# serializable currently being edited (which can be absent) or the one of which index is forced
ser=(self.seqEdit.ser if self.seqEdit else None) if forceIx<0 else currSeq[forceIx]
if comboEnabled and len(currSeq)==cnt and (ix<0 or ser==currSeq[ix]): return
if not comboEnabled and len(currSeq)==0: return
logging.debug(self.serType.__name__+' rebuilding list from scratch')
self.combo.clear()
if len(currSeq)>0:
prevIx=-1
for i,s in enumerate(currSeq):
self.combo.addItem(makeSerializableLabel(s,num=i,count=len(currSeq),addr=False))
if s==ser: prevIx=i
if forceIx>=0: newIx=forceIx # force the index (used from newSlot to make the new element active)
elif prevIx>=0: newIx=prevIx # if found what was active before, use it
elif ix>=0: newIx=ix # otherwise use the previous index (e.g. after deletion)
else: newIx=0 # fallback to 0
logging.debug('%s setting index %d'%(self.serType.__name__,newIx))
self.combo.setCurrentIndex(newIx)
else:
logging.debug('%s EMPTY, setting index 0'%(self.serType.__name__))
self.combo.setCurrentIndex(-1)
self.killButton.setEnabled(len(currSeq)>0)
def newSlot(self):
dialog=NewSerializableDialog(self,self.serType.__name__)
if not dialog.exec_(): return # cancelled
ser=dialog.result()
ix=self.combo.currentIndex()
currSeq=self.getter(); currSeq.insert(ix,ser); self.setter(currSeq)
logging.debug('%s new item created at index %d'%(self.serType.__name__,ix))
self.refreshEvent(forceIx=ix)
def killSlot(self):
ix=self.combo.currentIndex()
currSeq=self.getter(); del currSeq[ix]; self.setter(currSeq)
self.refreshEvent()
def upSlot(self):
i=self.combo.currentIndex()
assert(i>0)
currSeq=self.getter();
prev,curr=currSeq[i-1:i+1]; currSeq[i-1],currSeq[i]=curr,prev; self.setter(currSeq)
self.refreshEvent(forceIx=i-1)
def downSlot(self):
i=self.combo.currentIndex()
currSeq=self.getter(); assert(i<len(currSeq)-1);
curr,nxt=currSeq[i:i+2]; currSeq[i],currSeq[i+1]=nxt,curr; self.setter(currSeq)
self.refreshEvent(forceIx=i+1)
def refresh(self): pass # API compat with SerializableEditor
SeqSerializable=SeqSerializableComboBox
class NewFundamentalDialog(QDialog):
def __init__(self,parent,attrName,typeObj,typeStr):
QDialog.__init__(self,parent)
self.setWindowTitle('%s (type %s)'%(attrName,typeStr))
self.layout=QVBoxLayout(self)
self.scroll=QScrollArea(self)
self.scroll.setWidgetResizable(True)
self.buttons=QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel);
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
self.layout.addWidget(self.scroll)
self.layout.addWidget(self.buttons)
self.setWindowModality(Qt.WindowModal)
class FakeObjClass: pass
self.fakeObj=FakeObjClass()
self.attrName=attrName
Klass=_fundamentalEditorMap.get(typeObj,None)
initValue=_fundamentalInitValues.get(typeObj,typeObj())
setattr(self.fakeObj,attrName,initValue)
if Klass:
self.widget=Klass(None,self.fakeObj,attrName)
self.scroll.setWidget(self.widget)
self.scroll.show()
self.widget.refresh()
else: raise RuntimeError("Unable to construct new dialog for type %s"%(typeStr))
def result(self):
self.widget.update()
return getattr(self.fakeObj,self.attrName)
class NewSerializableDialog(QDialog):
def __init__(self,parent,baseClassName,includeBase=True):
import yade.system
QDialog.__init__(self,parent)
self.setWindowTitle('Create new object of type %s'%baseClassName)
self.layout=QVBoxLayout(self)
self.combo=QComboBox(self)
childs=list(yade.system.childClasses(baseClassName,includeBase=False)); childs.sort()
if includeBase:
self.combo.addItem(baseClassName)
self.combo.insertSeparator(1000)
self.combo.addItems(childs)
self.combo.currentIndexChanged.connect(self.comboSlot)
self.scroll=QScrollArea(self)
self.scroll.setWidgetResizable(True)
self.buttons=QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel);
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
self.layout.addWidget(self.combo)
self.layout.addWidget(self.scroll)
self.layout.addWidget(self.buttons)
self.ser=None
self.combo.setCurrentIndex(0); self.comboSlot(0)
self.setWindowModality(Qt.WindowModal)
def comboSlot(self,index):
item=str(self.combo.itemText(index))
self.ser=eval(item+'()')
self.scroll.setWidget(SerializableEditor(self.ser,self.scroll,showType=True))
self.scroll.show()
def result(self): return self.ser
def sizeHint(self): return QSize(180,400)
class SeqFundamentalEditor(QFrame):
def __init__(self,parent,getter,setter,itemType):
QFrame.__init__(self,parent)
self.getter,self.setter,self.itemType=getter,setter,itemType
self.layout=QVBoxLayout()
topLineFrame=QFrame(self); topLineLayout=QHBoxLayout(topLineFrame)
self.form=QFormLayout()
self.form.setContentsMargins(0,0,0,0)
self.form.setVerticalSpacing(0)
self.form.setLabelAlignment(Qt.AlignLeft)
self.formFrame=QFrame(self); self.formFrame.setLayout(self.form)
self.layout.addWidget(self.formFrame)
self.setLayout(self.layout)
# SerializableEditor API compat
self.hot=False
self.rebuild()
# periodic refresh
self.refreshTimer=QTimer(self)
self.refreshTimer.timeout.connect(self.refreshEvent)
self.refreshTimer.start(1000) # 1s should be enough
def contextMenuEvent(self, event):
index=self.localPositionToIndex(event.pos())
seq=self.getter()
if len(seq)==0: index=-1
field=self.form.itemAt(index,QFormLayout.LabelRole).widget() if index>=0 else None
menu=QMenu(self)
actNew,actKill,actUp,actDown=[menu.addAction(name) for name in (u'☘ New',u'☠ Remove',u'↑ Up',u'↓ Down')]
if index<0: [a.setEnabled(False) for a in actKill,actUp,actDown]
if index==len(seq)-1: actDown.setEnabled(False)
if index==0: actUp.setEnabled(False)
if field: field.setStyleSheet('QWidget { background: green }')
act=menu.exec_(self.mapToGlobal(event.pos()))
if field: field.setStyleSheet('QWidget { background: none }')
if not act: return
if act==actNew: self.newSlot(index)
elif act==actKill: self.killSlot(index)
elif act==actUp: self.upSlot(index)
elif act==actDown: self.downSlot(index)
def localPositionToIndex(self,pos):
gp=self.mapToGlobal(pos)
for row in range(self.form.count()/2):
w,i=self.form.itemAt(row,QFormLayout.FieldRole),self.form.itemAt(row,QFormLayout.LabelRole)
for wi in w.widget(),i.widget():
x0,y0,x1,y1=wi.geometry().getCoords(); globG=QRect(self.mapToGlobal(QPoint(x0,y0)),self.mapToGlobal(QPoint(x1,y1)))
if globG.contains(gp):
return row
return -1
def newSlot(self,i):
seq=self.getter();
seq.insert(i,_fundamentalInitValues.get(self.itemType,self.itemType()))
self.setter(seq)
self.rebuild()
def killSlot(self,i):
seq=self.getter(); assert(i<len(seq)); del seq[i]; self.setter(seq)
self.refreshEvent()
def upSlot(self,i):
seq=self.getter(); assert(i<len(seq));
prev,curr=seq[i-1:i+1]; seq[i-1],seq[i]=curr,prev; self.setter(seq)
self.refreshEvent(forceIx=i-1)
def downSlot(self,i):
seq=self.getter(); assert(i<len(seq)-1);
curr,nxt=seq[i:i+2]; seq[i],seq[i+1]=nxt,curr; self.setter(seq)
self.refreshEvent(forceIx=i+1)
def rebuild(self):
currSeq=self.getter()
# clear everything
rows=self.form.count()/2
for row in range(rows):
logging.trace('counts',self.form.rowCount(),self.form.count())
for wi in self.form.itemAt(row,QFormLayout.FieldRole),self.form.itemAt(row,QFormLayout.LabelRole):
self.form.removeItem(wi)
logging.trace('deleting widget',wi.widget())
widget=wi.widget(); widget.hide(); del widget # for some reason, deleting does not make the thing disappear visually; hiding does, however
logging.trace('counts after ',self.form.rowCount(),self.form.count())
logging.debug('cleared')
# add everything
Klass=_fundamentalEditorMap.get(self.itemType,None)
if not Klass:
errMsg=QTextEdit(self)
errMsg.setReadOnly(True); errMsg.setText("Sorry, editing sequences of %s's is not (yet?) implemented."%(self.itemType.__name__))
self.form.insertRow(0,'<b>Error</b>',errMsg)
return
class ItemGetter():
def __init__(self,getter,index): self.getter,self.index=getter,index
def __call__(self): return self.getter()[self.index]
class ItemSetter():
def __init__(self,getter,setter,index): self.getter,self.setter,self.index=getter,setter,index
def __call__(self,val): seq=self.getter(); seq[self.index]=val; self.setter(seq)
for i,item in enumerate(currSeq):
widget=Klass(self,ItemGetter(self.getter,i),ItemSetter(self.getter,self.setter,i)) #proxy,'value')
self.form.insertRow(i,'%d. '%i,widget)
logging.debug('added item %d %s'%(i,str(widget)))
if len(currSeq)==0: self.form.insertRow(0,'<i>empty</i>',QLabel('<i>(right-click for menu)</i>'))
logging.debug('rebuilt, will refresh now')
self.refreshEvent(dontRebuild=True) # avoid infinite recursion it the length would change meanwhile
def refreshEvent(self,dontRebuild=False,forceIx=-1):
currSeq=self.getter()
if len(currSeq)!=self.form.count()/2: #rowCount():
if dontRebuild: return # length changed behind our back, just pretend nothing happened and update next time instead
self.rebuild()
currSeq=self.getter()
for i in range(len(currSeq)):
item=self.form.itemAt(i,QFormLayout.FieldRole)
logging.trace('got item #%d %s'%(i,str(item.widget())))
widget=item.widget()
if not widget.hot:
widget.refresh()
if forceIx>=0 and forceIx==i: widget.setFocus()
def refresh(self): pass # SerializableEditor API
| gpl-2.0 | -7,651,492,641,948,851,000 | 44.41601 | 360 | 0.727223 | false | 2.92437 | false | false | false |
donaldharvey/snappy | snappy/utils.py | 1 | 2623 | import urllib2
import urllib
import os
from mimetools import choose_boundary
from mimetypes import guess_type
import stat
class Singleton(type):
def __init__(self, name, bases, dict):
super(Singleton, self).__init__(name, bases, dict)
self.instance = None
def __call__(self, *args, **kw):
if self.instance is None:
self.instance = super(Singleton, self).__call__(*args, **kw)
return self.instance
class MultipartDataHandler(urllib2.BaseHandler):
"""
A urllib2-based multipart/form-data poster, adapted slightly from
http://odin.himinbi.org/MultipartPostHandler.py and
http://code.activestate.com/recipes/146306/.
"""
handler_order = urllib2.HTTPHandler.handler_order - 20
def http_request(self, request):
data = request.get_data()
if data is not None and data is not str:
fields, files = [], []
for key, value in data.items():
if type(value) == file:
files.append((key, value))
else:
fields.append((key, value))
if not len(files):
# no files, so go straight ahead and encode the data
data = urllib.urlencode(fields, True)
else:
content_type, data = self._encode_multipart_formdata(fields, files)
req_content_type = request.get_header('Content-Type', '')
if 'multipart/form-data' in req_content_type:
request.set_header('Content-Type', content_type)
else:
request.add_unredirected_header('Content-Type', content_type)
request.add_data(data)
return request
https_request = http_request
def _encode_multipart_formdata(self, fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
boundary = choose_boundary()
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + boundary)
L.append('Content-Disposition: form-data; name="%s"' % str(key))
L.append('')
L.append(str(value))
for (key, fd) in files:
L.append('--' + boundary)
filename = os.path.basename(fd.name)
filesize = os.fstat(fd.fileno())[stat.ST_SIZE]
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (str(key), str(os.path.basename)))
mimetype = guess_type(filename)[0] or 'application/octet-stream'
L.append('Content-Type: %s' % mimetype)
L.append('Content-Length: %s' % filesize)
L.append('')
fd.seek(0)
L.append(fd.read())
L.append('--' + boundary + '--')
L.append('')
body = CRLF.join(L)
contenttype = 'multipart/form-data; boundary=%s' % boundary
return contenttype, body
| gpl-3.0 | 9,019,982,423,787,834,000 | 32.202532 | 107 | 0.676706 | false | 3.156438 | false | false | false |
intel-hadoop/Big-Data-Benchmark-for-Big-Bench | engines/hive/queries/q08/q08_filter_sales_with_reviews_viewed_before.py | 1 | 3144 | #"INTEL CONFIDENTIAL"
#Copyright 2016 Intel Corporation All Rights Reserved.
#
#The source code contained or described herein and all documents related to the source code ("Material") are owned by Intel Corporation or its suppliers or licensors. Title to the Material remains with Intel Corporation or its suppliers and licensors. The Material contains trade secrets and proprietary and confidential information of Intel or its suppliers and licensors. The Material is protected by worldwide copyright and trade secret laws and treaty provisions. No part of the Material may be used, copied, reproduced, modified, published, uploaded, posted, transmitted, distributed, or disclosed in any way without Intel's prior express written permission.
#
#No license under any patent, copyright, trade secret or other intellectual property right is granted to or conferred upon you by disclosure or delivery of the Materials, either expressly, by implication, inducement, estoppel or otherwise. Any license under such intellectual property rights must be express and approved by Intel in writing.
import sys
import logging
import traceback
import os
import time
from time import strftime
web_page_type_filter=sys.argv[1]
seconds_before_sale_filter = long(sys.argv[2])
if __name__ == "__main__":
line = ''
try:
current_key = ''
last_review_date=-1
#sales_sk should be distinct
last_sales_sk = ''
#expects input to be partitioned by uid and sorted by date_sk (and timestamp) ascending
for line in sys.stdin:
# lustered by wcs_user_sk and by wcs_user_sk, tstamp_inSec_str, wcs_sales_sk, wp_type ascending in this order => ensured by hive
wcs_user_sk, tstamp_inSec_str, wcs_sales_sk, wp_type = line.strip().split("\t")
#reset on partition change
if current_key != wcs_user_sk :
current_key = wcs_user_sk
last_review_date = -1
last_sales_sk = ''
tstamp_inSec = long(tstamp_inSec_str)
#found review before purchase, save last review date
if wp_type == web_page_type_filter:
last_review_date = tstamp_inSec
continue
#if we encounter a sold item ( wcs_sales_sk.isdigit() => valid non null value) and a user looked at a review within 'seconds_before_sale_filter' => print found sales_sk backt to hive
#if last_review_date > 0 and (tstamp_inSec - last_review_date) <= seconds_before_sale_filter and wcs_sales_sk.isdigit() : #version with duplicate sales_sk's
if last_review_date > 0 and (tstamp_inSec - last_review_date) <= seconds_before_sale_filter and wcs_sales_sk.isdigit() and last_sales_sk != wcs_sales_sk : #version reduced duplicate sales_sk's
last_sales_sk = wcs_sales_sk
print wcs_sales_sk
except:
## should only happen if input format is not correct, like 4 instead of 5 tab separated values
logging.basicConfig(level=logging.DEBUG, filename=strftime("/tmp/bigbench_q8_reducer_%Y%m%d-%H%M%S.log"))
logging.info('web_page_type_filter: ' + web_page_type_filter )
logging.info('seconds_before_sale_filter: ' + seconds_before_sale_filter )
logging.info("line from hive: \"" + line + "\"")
logging.exception("Oops:")
raise
sys.exit(1)
| apache-2.0 | -7,692,260,263,573,723,000 | 51.4 | 663 | 0.735687 | false | 3.409978 | false | false | false |
viswimmer1/PythonGenerator | data/python_files/34574373/cmss.py | 1 | 2623 | import win32pipe
import win32console
import win32process
import time
import win32con
import codecs
import ctypes
user32 = ctypes.windll.user32
CONQUE_WINDOWS_VK = {
'3' : win32con.VK_CANCEL,
'8' : win32con.VK_BACK,
'9' : win32con.VK_TAB,
'12' : win32con.VK_CLEAR,
'13' : win32con.VK_RETURN,
'17' : win32con.VK_CONTROL,
'20' : win32con.VK_CAPITAL,
'27' : win32con.VK_ESCAPE,
'28' : win32con.VK_CONVERT,
'35' : win32con.VK_END,
'36' : win32con.VK_HOME,
'37' : win32con.VK_LEFT,
'38' : win32con.VK_UP,
'39' : win32con.VK_RIGHT,
'40' : win32con.VK_DOWN,
'45' : win32con.VK_INSERT,
'46' : win32con.VK_DELETE,
'47' : win32con.VK_HELP
}
def make_input_key(c, control_key_state=None):
kc = win32console.PyINPUT_RECORDType (win32console.KEY_EVENT)
kc.KeyDown = True
kc.RepeatCount = 1
cnum = ord(c)
if cnum == 3:
pid_list = win32console.GetConsoleProcessList()
win32console.GenerateConsoleCtrlEvent(win32con.CTRL_C_EVENT, 0)
return
else:
kc.Char = unicode(c)
if str(cnum) in CONQUE_WINDOWS_VK:
kc.VirtualKeyCode = CONQUE_WINDOWS_VK[str(cnum)]
else:
kc.VirtualKeyCode = ctypes.windll.user32.VkKeyScanA(cnum)
#kc.VirtualKeyCode = ctypes.windll.user32.VkKeyScanA(cnum+96)
#kc.ControlKeyState = win32con.LEFT_CTRL_PRESSED
return kc
#win32console.AttachConsole()
coord = win32console.PyCOORDType
con_stdout = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)
con_stdin = win32console.GetStdHandle(win32console.STD_INPUT_HANDLE)
flags = win32process.NORMAL_PRIORITY_CLASS
si = win32process.STARTUPINFO()
si.dwFlags |= win32con.STARTF_USESHOWWINDOW
(handle1, handle2, i1, i2) = win32process.CreateProcess(None, "cmd.exe", None, None, 0, flags, None, '.', si)
time.sleep(1)
#size = con_stdout.GetConsoleScreenBufferInfo()['Window']
# with codecs.open("log.txt", "w", "utf8") as f:
# for i in xrange(0, size.Bottom):
# f.write(con_stdout.ReadConsoleOutputCharacter(size.Right+1, coord(0, i)))
# f.write("\n")
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
HOST = "127.0.0.1"
PORT = 5554
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(1)
(sc, scname) = s.accept()
while True:
msg = sc.recv(1)
if ord(msg) == 0:
break
keys = [make_input_key(msg)]
if keys:
con_stdin.WriteConsoleInput(keys)
win32process.TerminateProcess(handle1, 0) | gpl-2.0 | 1,044,929,001,205,104,300 | 26.846154 | 109 | 0.643157 | false | 2.766878 | false | false | false |
airanmehr/bio | Scripts/TimeSeriesPaper/Plot/topSNPs.py | 1 | 1589 | '''
Copyleft Oct 14, 2016 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: [email protected]
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import seaborn as sns
import matplotlib as mpl
import os;
home = os.path.expanduser('~') + '/'
import Utils.Util as utl
import Scripts.TimeSeriesPaper.RealData.Utils as rutl
a = rutl.loadAllScores().groupby(level='h', axis=1).apply(rutl.HstatisticAll)
df = pd.read_pickle(utl.outpath + 'real/scores.df')
i = df.lrd.sort_values().index[-1]
df.loc[i]
cd = pd.read_pickle(utl.outpath + 'real/CD.F59.df')
import Utils.Plots as pplt
import pylab as plt
names = rutl.loadSNPIDs()
sns.set_style("white", {"grid.color": "0.9", 'axes.linewidth': .5, "grid.linewidth": "9.99"})
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']});
mpl.rc('text', usetex=True)
reload(pplt)
f, ax = plt.subplots(1, 2, sharey=True, dpi=300, figsize=(4, 2))
i = a[0.5].sort_values().index[-1]
sns.set_context("notebook", font_scale=1, rc={"lines.linewidth": 1.2})
pplt.plotSiteReal(cd.loc[i], ax=ax[0], legend=True)
ax[0].set_title('{}:{:.0f} ({})'.format(i[0], i[1], names.loc[i]), fontsize=8)
i = df.lrdiff.sort_values().index[-1]
pplt.plotSiteReal(cd.loc[i], ax=ax[1])
sns.set_context("notebook", font_scale=1, rc={"lines.linewidth": 1.2})
ax[1].set_title('{}:{:.0f} ({})'.format(i[0], i[1], names.loc[i]), fontsize=8)
plt.gcf().subplots_adjust(bottom=0.2)
pplt.savefig('topSNPs', 300)
plt.show()
| mit | 2,218,888,666,753,329,700 | 32.104167 | 102 | 0.680302 | false | 2.486698 | false | true | false |
edx/edx-load-tests | util/generate_summary.py | 1 | 3450 | # -*- coding: utf-8 -*-
"""
Generate a summary of a previous loadtest run in this environment.
See for usage example in a jenkins job dsl:
https://github.com/edx/jenkins-job-dsl/blob/master/testeng/jobs/loadtestDriver.groovy
Prerequisites:
A logfile produced by util/run-loadtest.sh should be present in its
standard location.
Output:
Produces summary on standard output in YAML format. The structure is as
follows:
* monitoring_links:
* list of link text/url pairs pointing to monitoring dashboards.
* timeline:
* begin: ISO 8601 date for when the test began.
* end: ISO 8601 date for when the test ended.
"""
from datetime import timedelta
import yaml
import helpers.markers
from util.app_monitors_config import MONITORS
# Refer to util/run-loadtest.sh in case this file path changes.
STANDARD_LOGFILE_PATH = "results/log.txt"
def parse_logfile_events(logfile):
"""
Parse the logfile for events
Parameters:
logfile (file): the file containing locust logs for a single load test
Returns:
iterator of (datetime.datetime, str) tuples: the parsed events in the
order they are encountered.
"""
for line in logfile:
data = helpers.markers.parse_logfile_event_marker(line)
if data is not None:
yield (data['time'], data['event'])
def get_time_bounds(logfile):
"""
Determine when the load test started and stopped.
Parameters:
logfile (file): the file containing locust logs for a single load test
Returns:
two-tuple of datetime.datetime: the time bounds of the load test
"""
begin_time = end_time = None
relevant_events = ['locust_start_hatching', 'edx_heartbeat', 'quitting']
relevant_times = [
time
for time, event
in parse_logfile_events(logfile)
if event in relevant_events
]
begin_time, end_time = (min(relevant_times), max(relevant_times))
return (begin_time, end_time)
def main():
"""
Generate a summary of a previous load test run.
This script assumes "results/log.txt" is the logfile in question.
"""
with open(STANDARD_LOGFILE_PATH) as logfile:
loadtest_begin_time, loadtest_end_time = get_time_bounds(logfile)
monitoring_links = []
for monitor in MONITORS:
monitoring_links.append({
'url': monitor.url(
begin_time=loadtest_begin_time,
end_time=loadtest_end_time,
),
'text': u'{}: {} ({} — {})'.format(
monitor.monitoring_service_name,
monitor.app_name,
# We use naive datetimes (i.e. no attached tz) and just
# assume UTC all along. Tacking on the "Z" implies UTC.
loadtest_begin_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
loadtest_end_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
),
})
print(yaml.dump(
{
'timeline': {
'begin': loadtest_begin_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
'end': loadtest_end_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
},
'monitoring_links': monitoring_links
},
default_flow_style=False, # Represent objects using indented blocks
# rather than inline enclosures.
allow_unicode=True,
))
if __name__ == "__main__":
main()
| apache-2.0 | -6,008,860,902,320,476,000 | 30.345455 | 85 | 0.606729 | false | 3.913734 | true | false | false |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/prompt_toolkit/eventloop/coroutine.py | 1 | 3783 | from __future__ import unicode_literals
import types
from prompt_toolkit.eventloop.defaults import get_event_loop
from prompt_toolkit.eventloop.future import Future
__all__ = [
'From',
'Return',
'ensure_future',
]
def ensure_future(future_or_coroutine):
"""
Take a coroutine (generator) or a `Future` object, and make sure to return
a `Future`.
"""
if isinstance(future_or_coroutine, Future):
return future_or_coroutine
elif isinstance(future_or_coroutine, types.GeneratorType):
return _run_coroutine(future_or_coroutine)
else:
raise ValueError('Expecting coroutine or Future object. Got %r: %r' % (
type(future_or_coroutine), future_or_coroutine))
class Return(Exception):
"""
For backwards-compatibility with Python2: when "return" is not supported in
a generator/coroutine. (Like Trollius.)
Instead of ``return value``, in a coroutine do: ``raise Return(value)``.
"""
def __init__(self, value):
self.value = value
def __repr__(self):
return 'Return(%r)' % (self.value, )
def From(obj):
"""
Used to emulate 'yield from'.
(Like Trollius does.)
"""
return ensure_future(obj)
def _run_coroutine(coroutine):
"""
Takes a generator that can yield Future instances.
Example:
def gen():
yield From(...)
print('...')
yield From(...)
ensure_future(gen())
The values which are yielded by the given coroutine are supposed to be
`Future` objects.
"""
assert isinstance(coroutine, types.GeneratorType)
loop = get_event_loop()
result_f = loop.create_future()
# Wrap this future in a `_FutureRef`. We need this in order to be able to
# break all its references when we're done. This is important
# because in case of an exception, we want to be sure that
# `result_f.__del__` is triggered as soon as possible, so that we see the
# exception.
# (If `step_next` had a direct reference to `result_f` and there is a
# future that references `step_next`, then sometimes it won't be cleaned up
# immediately. - I'm not sure how exactly, but in that case it requires the
# garbage collector, because refcounting isn't sufficient.)
ref = _FutureRef(result_f)
# Loop through the generator.
def step_next(f=None):
" Execute next step of the coroutine."
try:
if f is None:
new_f = coroutine.send(None)
else:
exc = f.exception()
if exc:
new_f = coroutine.throw(exc)
else:
new_f = coroutine.send(f.result())
except StopIteration:
# Stop coroutine. Make sure that a result has been set in the future,
# this will call the callbacks. (Also, don't take any result from
# StopIteration, it has already been set using `raise Return()`.
if not ref.future.done():
ref.future.set_result(None)
ref.forget()
except Return as e:
ref.future.set_result(e.value)
ref.forget()
except BaseException as e:
ref.future.set_exception(e)
ref.forget()
else:
# Process yielded value from coroutine.
assert isinstance(new_f, Future), 'got %r' % (new_f, )
@new_f.add_done_callback
def continue_(_):
step_next(new_f)
# Start processing coroutine.
step_next()
return result_f
class _FutureRef(object):
def __init__(self, future):
self.future = future
def forget(self):
" Forget reference. "
self.future = None
| mit | 911,875,866,142,147,300 | 29.02381 | 81 | 0.591594 | false | 4.116431 | false | false | false |
bhdouglass/remindor-common | tests/test_time_validation.py | 1 | 1362 | import remindor_common.datetimeutil as d
valid_singular = [
"now",
"1:00pm",
"1:00 pm",
"13:00",
"13",
"1300",
"1pm"
]
valid_repeating = [
"every hour",
"every hour from 1 to 1:00pm",
"every minute",
"every minute from 2:00pm to 1500",
"every 3 minutes",
"every 3 minutes from 3:30pm to 3:45 pm",
"every 2 hours",
"every 2 hours from 8 to 10"
]
invalid = [
"every minute from",
"asdf",
"every minutes to 3",
"2500",
"25",
"-1",
"every -2 minutes",
"every minute from 5 to 1",
"every minute from 5 to 5",
"8/12/13",
"October 12",
"7-21-2013"
]
print "testing valid singular times"
for row in valid_singular:
print "?" + row + "?"
value = d.str_time_simplify(row)
print "!" + str(value) + "!"
if value == None:
print "value should not be None!"
exit()
print ""
print "testing valid repeating times"
for row in valid_repeating:
print "?" + row + "?"
value = d.str_time_simplify(row)
print "!" + str(value) + "!"
if value == None:
print "value should not be None!"
exit()
print ""
print "testing invalid times"
for row in invalid:
print row
value = d.str_time_simplify(row)
print value
if value != None:
print "value should be None!"
exit()
| gpl-3.0 | 4,687,957,397,284,547,000 | 18.73913 | 45 | 0.550661 | false | 3.274038 | false | false | false |
jokajak/itweb | data/env/lib/python2.6/site-packages/repoze.what-1.0.9-py2.6.egg/repoze/what/release.py | 1 | 1208 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008-2009, Gustavo Narea <[email protected]>
# All Rights Reserved.
#
# This software is subject to the provisions of the BSD-like license at
# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
# this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
# FITNESS FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
repoze.what release information.
The version number is loaded to help the Quickstart plugin configure
repoze.what correctly, depending on the version available -- although it may
be useful on other packages.
"""
import os
_here = os.path.abspath(os.path.dirname(__file__))
_root = os.path.dirname(os.path.dirname(_here))
version = open(os.path.join(_root, 'VERSION.txt')).readline().rstrip()
# The major version: If version=='3.0.2rc4', the major version is int(3).
major_version = int(version.split('.')[0])
| gpl-3.0 | -1,875,028,804,024,402,000 | 35.606061 | 78 | 0.639901 | false | 3.947712 | false | false | false |
picleslivre/schemaprobe | schemaprobe.py | 1 | 2343 | from __future__ import unicode_literals
import sys
import functools
import json
try:
import jsonschema
except ImportError:
jsonschema = None
try:
import requests
except ImportError:
requests = None
__version__ = '1.0.0.dev1'
__all__ = ['ensure', 'JsonProbe']
# --------------
# Py2 compat
# --------------
PY2 = sys.version_info[0] == 2
if PY2:
string_types = (str, unicode)
else:
string_types = (str,)
# --------------
class JsonProbe(object):
"""
An instance that knows how to perform validations against json-schema.
"""
_jsonschema = jsonschema
def __init__(self, schema):
"""
:param schema: json-schema as json-encoded text or python datastructures.
"""
if self._jsonschema is None:
raise TypeError('Missing dependency `jsonschema`.')
self.schema = self._normalize_input(schema)
def validate(self, input):
"""
Validate `input` agains the given schema.
:param input: json-encoded text or python datastructures.
:returns: boolean
"""
data = self._normalize_input(input)
try:
jsonschema.validate(data, self.schema)
except self._jsonschema.ValidationError:
return False
else:
return True
def _normalize_input(self, input):
"""
Always return python datastructures.
:param input: json-encoded text or python datastructures.
"""
if isinstance(input, string_types):
return json.loads(input)
else:
return input
def ensure(probe):
"""
Decorator that asserts the returned value is valid against `probe`.
"""
def ensure_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
if probe.validate(result):
return result
else:
raise TypeError('Returned data does not conform with the given schema.')
return wrapper
return ensure_decorator
class TestCaseMixin(object):
def assertSchemaIsValid(self, probe, resource_url, msg=None):
api_sample = requests.get(resource_url)
if not probe.validate(api_sample.json()):
raise self.failureException(msg or 'Schema is invalid.')
| bsd-2-clause | 1,664,159,037,023,619,000 | 23.154639 | 88 | 0.593683 | false | 4.314917 | false | false | false |
sighingnow/sighingnow.github.io | resource/k_nearest_neighbors/dating.py | 1 | 3622 | #! /usr/bin/env python
# -*- coding: utf-8
'''
Name: dating.py(KNN algorithm)
Training and test dataset: dating.txt
Created on Feb 8, 2015
@author: Tao He
'''
__author__ = 'Tao He'
from numpy import array as nmarray
from matplotlib import pyplot as plt
LABEL_MAP = {
'didntLike': 1,
'smallDoses': 2,
'largeDoses': 3,
}
ATTR_MAP = {
1: 'Number of frequent flyer miles earned per year',
2: 'Percentage of time spent playing video games',
3: 'Liters of ice cream consumed per week',
}
def create_dataset(filename=None):
''' Return data group and labels.
Get the data from file.
If the filename is not specialed, return None.
dataformat: flyerMiles, gameTime, icecream, label.
'''
def normalize_data(data=None):
''' Normalized dataset.
Normalize all data to range 0-1.
'''
if data is None:
return None
for column in range(data[0].__len__()):
max_val, min_val = max(data[:, column]), min(data[:, column])
for row in range(data.__len__()):
data[row][column] = (data[row][column]-min_val)/(max_val-min_val)
return data
if filename == None:
return (None, None)
group = []
labels = []
with open(filename, mode='r') as fp_data:
for line in fp_data:
group.append([float(num) for num in line[:-1].split('\t')[0:3]])
labels.append(LABEL_MAP[line[:-1].split('\t')[3]])
return normalize_data(nmarray(group)), labels
def draw_pic(group=None, labels=None, x=0, y=0):
''' Draw a subplot from data group.
'''
if group is None or labels is None:
return None
name = 'knn-dating'
figure = plt.figure(num=name, dpi=100)
ax_main = figure.add_subplot(1, 1, 1, xlabel=ATTR_MAP[x+1], ylabel=ATTR_MAP[y+1], title=name)
ax_main.scatter(group[:, x], group[:, y],
s=15*nmarray(labels),
c=[[i/LABEL_MAP.__len__()] for i in labels])
plt.show()
## plt.savefig('%s.png'%name, format='png', dpi=100)
def knn_classify(group, labels, attrs, ratio=0.5, item=0, k=3):
''' Return the type of item.
knn classify function.
'''
def get_dist(i, j):
''' Return the distence of group[i] and group[j].
'''
dist = 0.0
for attr in attrs:
dist += (group[i][attr]-group[j][attr])*(group[i][attr]-group[j][attr])
return dist
length = group.__len__()
distence = []
for i in range(int(length*ratio), length):
distence.append((i, get_dist(item, i)))
cnt = {}
distence.sort(key=lambda item: item[1])
for i in range(k):
label = labels[distence[i][0]]
if label in cnt:
cnt[label] += 1
else:
cnt[label] = 1
return sorted(cnt.items(), key=lambda item: item[1], reverse=True)[0][0]
def knn():
''' KNN classify algorithm.
'''
data, labels = create_dataset('dating.txt')
ratio, attr = 0.5, [0, 1, 2]
cnt, cnt_correct = 0, 0
length = data.__len__()
for i in range(0, int(length*ratio)):
cnt += 1
knn_type = knn_classify(data, labels, attr, ratio, i, 3)
# print('case[%d]: real: %d, knn: %d'%(i, labels[i], knn_type))
if knn_type == labels[i]:
cnt_correct += 1
print('total: %d, correct: %d, correct ratio: %f'%(cnt, cnt_correct, cnt_correct/cnt))
if __name__ == '__main__':
knn()
# vim: set sw=4, ts=4, fileencoding=utf-8
| mit | 8,461,827,833,393,829,000 | 27.933884 | 97 | 0.543622 | false | 3.301732 | false | false | false |
AYJAYY/KenoDB | keno.py | 1 | 4245 | # Keno Data Logging - QuickKeno
# KDL v1.5.2 - Python 3 Conversion
# Last Edit Date: 1/9/2021
from urllib.request import urlopen
import json
import time
def write_file(file_name, write_mode, file_text):
text_file = open(file_name, write_mode)
text_file.write(file_text)
text_file.close()
#get the keno json file
ma_keno_json = urlopen("http://www.masslottery.com/data/json/search/dailygames/todays/15.json")
#read from the json file
json_string = ma_keno_json.read()
#parse the json file so we can work with it
parsed_json = json.loads(json_string)
#get the min and max game and subtract them...
#...so we can get total number of games to iterate over
min_game = int(parsed_json['min'])
max_game = int(parsed_json['max'])
games = max_game - min_game
#script loop
while games > 0:
#get info from "draws" section in json file + create error log
orgOrder = parsed_json['draws'][games]['winning_num_org']
sortedOrder = parsed_json['draws'][games]['winning_num']
multiplier = parsed_json['draws'][games]['bonus']
multi_int = parsed_json['draws'][games]['bonus_value']
draw = parsed_json['draws'][games]['draw_id']
#split on dashes 19 times to split up the 20 numbers
orgOrder_split = orgOrder.split('-', 19)
#join the 20 numbers with commas to accomodate the csv
orgOrder_join = ",".join(orgOrder_split)
orgOrder_column = "\n".join(orgOrder_split)
#a way to string together the data using my "write file" function, this
#also turns everything into a string format so I can concatenate them.
long_text = str(orgOrder_join + "," + orgOrder + "," + sortedOrder + "," + multiplier + "," + multi_int + "," + draw) + "\n"
#also put the numbers in a single row for alternate file
single_row = str(orgOrder_column + "\n")
#write out to the files individually
try:
#format today's date for the filename and set it
date = time.strftime("%Y-%m-%d")
kenodbfile = "KenoFiles/Daily/kenodb" + str(date) + ".csv"
#write a new daily file
write_file(kenodbfile, "a+", long_text)
#append to the master file
write_file("KenoFiles/kenodbfull.csv", "a+", long_text)
#append to the single column file
write_file("KenoFiles/kenodbfull-1column.csv", "a+", single_row)
#in case the user is running on demand, give success messages & log them
print("Succesfully logged game #" + draw)
vlog_string = "<font size='1px'><strong>Succesfully logged game:</strong> " + draw + " <strong>|</strong> </font>" + "\n"
sys_log = "KenoFiles/SYSLOG.html"
write_file(sys_log,"a+",vlog_string)
except Exception as eW:
error_date_eW = time.strftime("%Y-%m-%d-%I:%M %p")
error_text_eW = str(eW) + " | " + "File Write Error" + " | " + error_date_eW + "<br \>" + "\n"
sys_log = "KenoFiles/SYSLOG.html"
log_html = "KenoFiles/LOG.html"
html_text = """<button type="button" class="btn btn-danger">An error has occured while writing to one of the files. Check the log in /KenoFiles</button><br \>""" + "\n"
write_file(sys_log,"a+",error_text_eW)
write_file(log_html,"a+",html_text)
print("An error has occured while writing to one of the files. Check the logs in /KenoFiles")
break
games = games - 1
#success - write to logs and print out in case this is an on demand run
games = max_game - min_game
success_date = time.strftime("%Y-%m-%d-%I:%M %p")
log_html = "KenoFiles/LOG.html"
sys_log = "KenoFiles/SYSLOG.html"
success_html = "<center><div class='bg-success' style='border:1px solid green;'><strong><font color='green'> KenoDB completed successfully" + " | " + success_date + " | Min Game: " + str(min_game) + " | Max Game: " + str(max_game) + " | Total Games: " + str(games) + "</font></strong></div></center><br \>" + "\n"
sys_success_html = """<button type="button" class="btn btn-success">KenoDB completed successfully""" + " | Date: " + success_date + " | Min Game: " + str(min_game) + " | Max Game: " + str(max_game) + " | Number Of Games: " + str(games) + "</button><br \>" + "\n"
write_file(log_html,"a+",sys_success_html)
write_file(sys_log,"a+",success_html)
print("KenoDB completed successfully")
| gpl-3.0 | 8,259,616,565,052,512,000 | 47.793103 | 313 | 0.643816 | false | 3.151448 | false | false | false |
nephila/djangocms-blog | djangocms_blog/liveblog/migrations/0001_initial.py | 1 | 2058 | import django.db.models.deletion
import filer.fields.image
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("cms", "0013_urlconfrevision"),
("filer", "0003_thumbnailoption"),
]
operations = [
migrations.CreateModel(
name="Liveblog",
fields=[
(
"cmsplugin_ptr",
models.OneToOneField(
parent_link=True,
auto_created=True,
primary_key=True,
serialize=False,
to="cms.CMSPlugin",
on_delete=django.db.models.deletion.CASCADE,
),
),
("body", models.TextField(verbose_name="body")),
("publish", models.BooleanField(default=False, verbose_name="publish liveblog entry")),
(
"image",
filer.fields.image.FilerImageField(
related_name="djangocms_blog_liveblog_image",
on_delete=django.db.models.deletion.SET_NULL,
verbose_name="image",
blank=True,
to="filer.Image",
null=True,
),
),
(
"thumbnail",
models.ForeignKey(
related_name="djangocms_blog_liveblog_thumbnail",
on_delete=django.db.models.deletion.SET_NULL,
verbose_name="thumbnail size",
blank=True,
to="filer.ThumbnailOption",
null=True,
),
),
],
options={
"verbose_name": "liveblog entry",
"verbose_name_plural": "liveblog entries",
},
bases=("cms.cmsplugin",),
),
]
| bsd-3-clause | 3,167,827,842,490,844,700 | 33.881356 | 103 | 0.420797 | false | 5.415789 | false | false | false |
Mirantis/swift-encrypt | swift/common/ring/utils.py | 1 | 2880 | from collections import defaultdict
def tiers_for_dev(dev):
"""
Returns a tuple of tiers for a given device in ascending order by
length.
:returns: tuple of tiers
"""
t1 = dev['zone']
t2 = "{ip}:{port}".format(ip=dev.get('ip'), port=dev.get('port'))
t3 = dev['id']
return ((t1,),
(t1, t2),
(t1, t2, t3))
def build_tier_tree(devices):
"""
Construct the tier tree from the zone layout.
The tier tree is a dictionary that maps tiers to their child tiers.
A synthetic root node of () is generated so that there's one tree,
not a forest.
Example:
zone 1 -+---- 192.168.1.1:6000 -+---- device id 0
| |
| +---- device id 1
| |
| +---- device id 2
|
+---- 192.168.1.2:6000 -+---- device id 3
|
+---- device id 4
|
+---- device id 5
zone 2 -+---- 192.168.2.1:6000 -+---- device id 6
| |
| +---- device id 7
| |
| +---- device id 8
|
+---- 192.168.2.2:6000 -+---- device id 9
|
+---- device id 10
|
+---- device id 11
The tier tree would look like:
{
(): [(1,), (2,)],
(1,): [(1, 192.168.1.1:6000),
(1, 192.168.1.2:6000)],
(2,): [(2, 192.168.2.1:6000),
(2, 192.168.2.2:6000)],
(1, 192.168.1.1:6000): [(1, 192.168.1.1:6000, 0),
(1, 192.168.1.1:6000, 1),
(1, 192.168.1.1:6000, 2)],
(1, 192.168.1.2:6000): [(1, 192.168.1.2:6000, 3),
(1, 192.168.1.2:6000, 4),
(1, 192.168.1.2:6000, 5)],
(2, 192.168.2.1:6000): [(2, 192.168.2.1:6000, 6),
(2, 192.168.2.1:6000, 7),
(2, 192.168.2.1:6000, 8)],
(2, 192.168.2.2:6000): [(2, 192.168.2.2:6000, 9),
(2, 192.168.2.2:6000, 10),
(2, 192.168.2.2:6000, 11)],
}
:devices: device dicts from which to generate the tree
:returns: tier tree
"""
tier2children = defaultdict(set)
for dev in devices:
for tier in tiers_for_dev(dev):
if len(tier) > 1:
tier2children[tier[0:-1]].add(tier)
else:
tier2children[()].add(tier)
return tier2children
| apache-2.0 | -1,470,004,698,661,708,500 | 31.359551 | 71 | 0.365625 | false | 3.794466 | false | false | false |
murdej/h2pws | h2pws.py | 1 | 2618 | import time
import BaseHTTPServer
from urlparse import urlparse, parse_qs
import subprocess
import base64
import qrcode
import qrcode.image.svg
import cStringIO
#1630-1800
HOST_NAME = 'localhost' # !!!REMEMBER TO CHANGE THIS!!!
PORT_NUMBER = 8000 # Maybe set this to 9000.
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_HEAD(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def do_GET(s):
"""Respond to a GET request."""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write("<html><head><title></title></head>")
s.wfile.write("<body><p>Send html source by POST.</p>")
# s.wfile.write("<p>You accessed path: %s</p>" % s.path)
s.wfile.write("</body></html>")
def do_POST(s):
"""Respond to a POST request."""
s.send_response(200)
s.send_header("Content-type", "application/x-pdf")
s.end_headers()
# params
url_params = parse_qs(urlparse(s.path).query)
args = ["wkhtmltopdf"]
for n in [ 'orientation', 'page-size', 'margin-bottom', 'margin-left', 'margin-right', 'margin-top' ]:
if n in url_params:
args += [ '--' + n, url_params[n][0] ]
args += ["-", "-"]
print args
html = s.rfile.read(int(s.headers.getheader('content-length')))
# Replace "qr::xxxxxxxxxxxxxxxxx" to sql qr code
if "qr-to-svg" in url_params :
new_html = ''
pos = 0
while True:
begin_str = '"qr::'
pos_a = html.find(begin_str, pos)
if pos_a == -1: break
# copy text before
new_html += html[pos:pos_a]
# extract src of QR code
pos_a += len(begin_str)
pos_b = html.find('"', pos_a + 1)
qr_src = html[pos_a:pos_b]
print "qr:src='" + qr_src + "'"
# new_html += '[[' + qr_src + ']]'
factory = qrcode.image.svg.SvgPathImage
img = qrcode.make(qr_src, image_factory=factory)
output = cStringIO.StringIO()
img.save(output)
svgb = 'data:image/svg+xml;base64,' + base64.b64encode(output.getvalue())
output.close()
new_html += svgb
pos = pos_b
new_html += html[pos:]
html = new_html
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.write(html)
p.stdin.close()
s.wfile.write(p.stdout.read())
p.wait()
if __name__ == '__main__':
server_class = BaseHTTPServer.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler)
print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER) | gpl-2.0 | 4,676,818,734,918,425,000 | 26 | 104 | 0.637128 | false | 2.8 | false | false | false |
texib/bitcoin-zoo | member/views.py | 1 | 3349 | from django.shortcuts import render
from django.contrib.auth.models import User, Group
from django.contrib.auth import login
from django.contrib.auth import logout
from django.contrib.auth import authenticate
from django.http import HttpResponseRedirect
from rest_framework import viewsets
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from serializer import UserSerializer, GroupSerializer
from rest_framework import status
from rest_framework import parsers
from rest_framework import renderers
from rest_framework_jwt import utils
from rest_framework_jwt.authentication import JSONWebTokenAuthentication as jwt_auth
from rest_framework_jwt.serializers import JSONWebTokenSerializer
# userena
from userena import views
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
authentication_classes = (BasicAuthentication, )
queryset = User.objects.all()
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
def signup(request):
'''
a simple overwritten the signup view
'''
return views.signup(request, success_url='/home/')
def signout(request):
'''
'''
return views.signout(request, template_name='home.html')
def signin(request):
'''
'''
# this is a little trick to hack the userena signin function
return views.signin(request, redirect_signin_function=lambda *arg: '/home/')
class testSignin(APIView):
'''
API View that receives a POST with a user's username and password.
Returns a JSON Web Token that can be used for authenticated requests.
'''
throttle_classes = ()
permission_classes = ()
authentication_classes = ()
parser_classes = (parsers.FormParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = JSONWebTokenSerializer
jwt = jwt_auth()
def post(self, request):
'''
a known issue now...
a segment fault happens if you login and then logout and login again..
'''
serializer = testSignin.serializer_class(data=request.DATA)
if serializer.is_valid():
payload = utils.jwt_decode_handler(serializer.object['token'])
user = self.jwt.authenticate_credentials(payload)
# below is a tric for authenticate..
# due to the authentication in django -- it need username and password,
# however, decode of jwt doesn't contain password.
user.backend = 'django.contrib.auth.backends.ModelBackend'
# user = authenticate(username=user, nopass=True)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/home/')
else:
raise Exception('user not active')
else:
raise Exception('not valid user')
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| mit | -4,012,741,038,384,615,400 | 30.299065 | 84 | 0.693341 | false | 4.550272 | false | false | false |
dnowatsc/Varial | varial/operations.py | 1 | 23334 | """
Operations on wrappers
"""
import array
import __builtin__
import ctypes
import collections
import functools
from ROOT import THStack, TGraphAsymmErrors
import history
import wrappers
class OperationError(Exception): pass
class TooFewWrpsError(OperationError): pass
class TooManyWrpsError(OperationError): pass
class WrongInputError(OperationError): pass
class NoLumiMatchError(OperationError): pass
def iterableize(obj):
if isinstance(obj, collections.Iterable):
return obj
else:
return [obj]
def add_wrp_kws(func):
"""Pops 'wrp_kws' from given keywords and updates returned wrapper."""
@functools.wraps(func)
def catch_wrp_kws(*args, **kws):
wrp_kws = kws.pop('wrp_kws', {})
ret = func(*args, **kws)
ret.__dict__.update(wrp_kws)
return ret
return catch_wrp_kws
@add_wrp_kws
@history.track_history
def stack(wrps):
"""
Applies only to HistoWrappers. Returns StackWrapper.
Checks lumi to be equal among all wrappers.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 4.5)
>>> h1.Fill(1,4)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> h2 = TH1I("h2", "", 2, .5, 4.5)
>>> h2.Fill(1,3)
1
>>> h2.Fill(3,6)
2
>>> w2 = wrappers.HistoWrapper(h2, lumi=2.)
>>> w3 = stack([w1, w2])
>>> w3.histo.Integral()
13.0
>>> w3.lumi
2.0
"""
wrps = iterableize(wrps)
stk_wrp = None
lumi = 0.
info = None
sample = ""
for wrp in wrps:
if not isinstance(wrp, wrappers.HistoWrapper): # histo check
raise WrongInputError(
"stack accepts only HistoWrappers. wrp: "
+ str(wrp)
)
if not stk_wrp: # stack init
stk_wrp = THStack(wrp.name, wrp.title)
lumi = wrp.lumi
info = wrp.all_info()
sample = wrp.sample
elif lumi != wrp.lumi: # lumi check
raise NoLumiMatchError(
"stack needs lumis to match. (%f != %f)" % (lumi, wrp.lumi)
)
if sample != wrp.sample: # add to stack
sample = ""
stk_wrp.Add(wrp.histo)
if not info:
raise TooFewWrpsError(
"At least one Wrapper must be provided."
)
if not sample:
del info["sample"]
return wrappers.StackWrapper(stk_wrp, **info)
@add_wrp_kws
@history.track_history
def sum(wrps):
"""
Applies only to HistoWrappers. Returns HistoWrapper. Adds lumi up.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 4.5)
>>> h1.Fill(1)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> h2 = TH1I("h2", "", 2, .5, 4.5)
>>> h2.Fill(1)
1
>>> h2.Fill(3)
2
>>> w2 = wrappers.HistoWrapper(h2, lumi=3.)
>>> w3 = sum([w1, w2])
>>> w3.histo.Integral()
3.0
>>> w3.lumi
5.0
"""
wrps = iterableize(wrps)
histo = None
lumi = 0.
info = None
for wrp in wrps:
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"sum accepts only HistoWrappers. wrp: "
+ str(wrp)
)
if histo:
histo.Add(wrp.histo)
else:
histo = wrp.histo.Clone()
info = wrp.all_info()
lumi += wrp.lumi
if not info:
raise TooFewWrpsError(
"At least one Wrapper must be provided."
)
info["lumi"] = lumi
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def diff(wrps):
"""
Applies only to HistoWrappers. Returns HistoWrapper. Takes lumi from first.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 4.5)
>>> h1.Fill(1, 2)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> h2 = TH1I("h2", "", 2, .5, 4.5)
>>> h2.Fill(1)
1
>>> w2 = wrappers.HistoWrapper(h2, lumi=3.)
>>> w3 = diff([w1, w2])
>>> w3.histo.Integral()
1.0
>>> w3.lumi
2.0
"""
wrps = iterableize(wrps)
histo = None
lumi = 0.
info = None
for wrp in wrps:
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"sum accepts only HistoWrappers. wrp: "
+ str(wrp)
)
if histo:
histo.Add(wrp.histo, -1.)
else:
histo = wrp.histo.Clone()
info = wrp.all_info()
lumi = wrp.lumi
if not info:
raise TooFewWrpsError(
"At least one Wrapper must be provided."
)
info["lumi"] = lumi
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def merge(wrps):
"""
Applies only to HistoWrapper. Returns HistoWrapper. Normalizes histos to lumi.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1,4)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> h2 = TH1I("h2", "", 2, .5, 2.5)
>>> h2.Fill(1,3)
1
>>> h2.Fill(2,6)
2
>>> w2 = wrappers.HistoWrapper(h2, lumi=3.)
>>> w3 = merge([w1, w2])
>>> w3.histo.Integral()
5.0
>>> w3.lumi
1.0
"""
wrps = iterableize(wrps)
histo = None
info = None
for wrp in wrps:
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"merge accepts only HistoWrappers. wrp: "
+ str(wrp)
)
if histo:
histo.Add(wrp.histo, 1. / wrp.lumi)
else:
histo = wrp.histo.Clone()
histo.Scale(1. / wrp.lumi)
info = wrp.all_info()
if not info:
raise TooFewWrpsError(
"At least one Wrapper must be provided."
)
info["lumi"] = 1.
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def prod(wrps):
"""
Applies to HistoWrapper and FloatWrapper. Returns HistoWrapper. Takes lumi from first.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2, history="w1")
>>> h2 = TH1I("h2", "", 2, .5, 2.5)
>>> h2.Fill(1)
1
>>> h2.Fill(2)
2
>>> w2 = wrappers.HistoWrapper(h2, lumi=3)
>>> w3 = prod([w1, w2])
>>> w3.histo.Integral()
1.0
>>> w3.lumi
1.0
>>> w4 = wrappers.FloatWrapper(2.)
>>> w5 = prod([w1, w4])
>>> w5.histo.Integral()
2.0
"""
wrps = iterableize(wrps)
histo = None
info = None
lumi = 1.
for wrp in wrps:
if histo:
if isinstance(wrp, wrappers.HistoWrapper):
histo.Multiply(wrp.histo)
lumi = 1.
elif not isinstance(wrp, wrappers.FloatWrapper):
raise WrongInputError(
"prod accepts only HistoWrappers and FloatWrappers. wrp: "
+ str(wrp)
)
else:
histo.Scale(wrp.float)
lumi *= wrp.float
else:
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"prod expects first argument to be of type HistoWrapper. wrp: "
+ str(wrp)
)
histo = wrp.histo.Clone()
info = wrp.all_info()
lumi = wrp.lumi
if not info:
raise TooFewWrpsError(
"At least one Wrapper must be provided."
)
info["lumi"] = lumi
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def div(wrps):
"""
Applies to HistoWrapper and FloatWrapper. Returns HistoWrapper. Takes lumi from first.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1,4)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2)
>>> h2 = TH1I("h2", "", 2, .5, 2.5)
>>> h2.Fill(1,2)
1
>>> w2 = wrappers.HistoWrapper(h2, lumi=3)
>>> w3 = div([w1, w2])
>>> w3.histo.Integral()
2.0
>>> w4 = wrappers.FloatWrapper(2., history="w4")
>>> w5 = div([w1, w4])
>>> w5.histo.Integral()
2.0
"""
wrps = iterableize(wrps)
wrps = iter(wrps)
try:
nominator = next(wrps)
denominator = next(wrps)
except StopIteration:
raise TooFewWrpsError("div needs exactly two Wrappers.")
try:
wrps.next()
raise TooManyWrpsError("div needs exactly two Wrappers.")
except StopIteration:
pass
if not isinstance(nominator, wrappers.HistoWrapper):
raise WrongInputError(
"div needs nominator to be of type HistoWrapper. nominator: "
+ str(nominator)
)
if not (isinstance(denominator, wrappers.HistoWrapper) or
isinstance(denominator, wrappers.FloatWrapper)):
raise WrongInputError(
"div needs denominator to be of type HistoWrapper or FloatWrapper. denominator: "
+ str(denominator)
)
histo = nominator.histo.Clone()
lumi = nominator.lumi
if isinstance(denominator, wrappers.HistoWrapper):
histo.Divide(denominator.histo)
lumi = 1.
else:
histo.Scale(1. / denominator.float)
lumi /= denominator.float
info = nominator.all_info()
info["lumi"] = lumi
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def lumi(wrp):
"""
Applies to HistoWrapper. Returns FloatWrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> w2 = lumi(w1)
>>> w2.float
2.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"lumi needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
info = wrp.all_info()
return wrappers.FloatWrapper(wrp.lumi, **info)
@add_wrp_kws
@history.track_history
def norm_to_lumi(wrp):
"""
Applies to HistoWrapper. Returns HistoWrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1, 4)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> w1.histo.Integral()
4.0
>>> w2 = norm_to_lumi(w1)
>>> w2.histo.Integral()
2.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"norm_to_lumi needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
histo = wrp.histo.Clone()
histo.Scale(1. / wrp.lumi)
info = wrp.all_info()
info["lumi"] = 1.
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def norm_to_integral(wrp, use_bin_width=False):
"""
Applies to HistoWrapper. Returns HistoWrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1, 4)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> w1.histo.Integral()
4.0
>>> w2 = norm_to_integral(w1)
>>> w2.histo.Integral()
1.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"norm_to_integral needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
histo = wrp.histo.Clone()
option = "width" if use_bin_width else ""
integr = wrp.histo.Integral(option) or 1.
histo.Scale(1. / integr)
info = wrp.all_info()
info["lumi"] /= integr
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def copy(wrp):
"""
Applies to HistoWrapper. Returns HistoWrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1, 4)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> w2=copy(w1)
>>> w2.histo.GetName()
'h1'
>>> w1.name == w2.name
True
>>> w1.histo.Integral() == w2.histo.Integral()
True
>>> w1.histo != w2.histo
True
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"copy needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
histo = wrp.histo.Clone()
info = wrp.all_info()
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def rebin(wrp, bin_bounds, norm_by_bin_width=False):
"""
Applies to HistoWrapper. Returns Histowrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 4, .5, 4.5)
>>> h1.Fill(1)
1
>>> h1.Fill(2)
2
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> w2=rebin(w1, [.5, 2.5, 4.5])
>>> w1.histo.GetNbinsX()
4
>>> w2.histo.GetNbinsX()
2
>>> w2.histo.GetBinContent(1)
2.0
>>> w2.histo.GetBinContent(2)
0.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"rebin needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
if len(bin_bounds) < 2:
raise OperationError(
"Number of bins < 2, must include at least one bin!"
)
bin_bounds = array.array("d", bin_bounds)
orig_bin_width = wrp.histo.GetBinWidth(1)
histo = wrp.histo.Rebin(
len(bin_bounds) - 1,
wrp.name,
bin_bounds
)
if norm_by_bin_width:
for i in xrange(histo.GetNbinsX()+1):
factor = histo.GetBinWidth(i) / orig_bin_width
histo.SetBinContent(i, histo.GetBinContent(i) / factor)
histo.SetBinError(i, histo.GetBinError(i) / factor)
info = wrp.all_info()
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def trim(wrp, left=True, right=True):
"""
Applies to HistoWrapper. Returns Histowrapper.
If left / right are set to values, these are applied. Otherwise empty bins
are cut off.
>>> from ROOT import TH1I
>>> w1 = wrappers.HistoWrapper(TH1I("h1", "", 10, .5, 10.5))
>>> w1.histo.Fill(5)
5
>>> w2 = trim(w1)
>>> w2.histo.GetNbinsX()
1
>>> w2.histo.GetXaxis().GetXmin()
4.5
>>> w2.histo.GetXaxis().GetXmax()
5.5
>>> w2 = trim(w1, 3.5, 7.5)
>>> w2.histo.GetNbinsX()
4
>>> w2.histo.GetXaxis().GetXmin()
3.5
>>> w2.histo.GetXaxis().GetXmax()
7.5
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"trim needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
# find left / right values if not given
histo = wrp.histo
axis = histo.GetXaxis()
n_bins = histo.GetNbinsX()
if type(left) == bool:
if left:
for i in xrange(n_bins+1):
if histo.GetBinContent(i):
left = axis.GetBinLowEdge(i)
break
else:
left = axis.GetXmin()
if type(right) == bool:
if right:
for i in xrange(n_bins+1, 0, -1):
if histo.GetBinContent(i):
right = axis.GetBinUpEdge(i)
break
else:
right = axis.GetXmax()
if left > right:
raise OperationError("bounds: left > right")
# create new bin_bounds
index = 0
while axis.GetBinLowEdge(index) < left:
index += 1
bin_bounds = [axis.GetBinLowEdge(index)]
while axis.GetBinUpEdge(index) <= right:
bin_bounds.append(axis.GetBinUpEdge(index))
index += 1
return rebin(wrp, bin_bounds)
@add_wrp_kws
@history.track_history
def mv_in(wrp, overflow=True, underflow=True):
"""
Moves under- and/or overflow bin into first/last bin.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 4.5)
>>> h1.Fill(0)
-1
>>> h1.Fill(5,3)
-1
>>> w1 = wrappers.HistoWrapper(h1)
>>> w1.histo.Integral()
0.0
>>> w2 = mv_in(w1, False, False)
>>> w2.histo.Integral()
0.0
>>> w3 = mv_in(w1, True, False)
>>> w3.histo.Integral()
3.0
>>> w4 = mv_in(w1, False, True)
>>> w4.histo.Integral()
1.0
>>> w5 = mv_in(w1, True, True)
>>> w5.histo.Integral()
4.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"mv_bin needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
histo = wrp.histo.Clone()
nbins = histo.GetNbinsX()
if underflow:
firstbin = histo.GetBinContent(0)
firstbin += histo.GetBinContent(1)
histo.SetBinContent(1, firstbin)
histo.SetBinContent(0, 0.)
if overflow:
lastbin = histo.GetBinContent(nbins + 1)
lastbin += histo.GetBinContent(nbins)
histo.SetBinContent(nbins, lastbin)
histo.SetBinContent(histo.GetNbinsX() + 1, 0.)
return wrappers.HistoWrapper(histo, **wrp.all_info())
@add_wrp_kws
@history.track_history
def integral(wrp, use_bin_width=False):
"""
Integral. Applies to HistoWrapper. Returns FloatWrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 4.5)
>>> h1.Fill(1)
1
>>> h1.Fill(3,3)
2
>>> w1 = wrappers.HistoWrapper(h1)
>>> w2 = integral(w1)
>>> w2.float
4.0
>>> w3 = integral(w1, True)
>>> w3.float
8.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"int needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
option = "width" if use_bin_width else ""
info = wrp.all_info()
return wrappers.FloatWrapper(wrp.histo.Integral(option), **info)
@add_wrp_kws
@history.track_history
def int_l(wrp, use_bin_width=False):
"""
Left-sided integral. Applies to HistoWrapper. Returns HistoWrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 4.5)
>>> h1.Fill(1)
1
>>> h1.Fill(3,2)
2
>>> w1 = wrappers.HistoWrapper(h1)
>>> w2 = int_l(w1)
>>> w2.histo.GetBinContent(1)
1.0
>>> w2.histo.GetBinContent(2)
3.0
>>> w2 = int_l(w1, True)
>>> w2.histo.GetBinContent(1)
2.0
>>> w2.histo.GetBinContent(2)
6.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"int_l needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
int_histo = wrp.histo.Clone()
option = "width" if use_bin_width else ""
for i in xrange(int_histo.GetNbinsX(), 0, -1):
error = ctypes.c_double()
value = int_histo.IntegralAndError(1, i, error, option)
int_histo.SetBinContent(i, value)
int_histo.SetBinError(i, error.value)
info = wrp.all_info()
return wrappers.HistoWrapper(int_histo, **info)
@add_wrp_kws
@history.track_history
def int_r(wrp, use_bin_width=False):
"""
Applies to HistoWrapper. Returns HistoWrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 4.5)
>>> h1.Fill(1)
1
>>> h1.Fill(3,2)
2
>>> w1 = wrappers.HistoWrapper(h1)
>>> w2 = int_r(w1)
>>> w2.histo.GetBinContent(1)
3.0
>>> w2.histo.GetBinContent(2)
2.0
>>> w2 = int_r(w1, True)
>>> w2.histo.GetBinContent(1)
6.0
>>> w2.histo.GetBinContent(2)
4.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"int_r needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
int_histo = wrp.histo.Clone()
option = "width" if use_bin_width else ""
n_bins = int_histo.GetNbinsX()
for i in xrange(1, 1 + n_bins):
error = ctypes.c_double()
value = int_histo.IntegralAndError(i, n_bins, error, option)
int_histo.SetBinContent(i, value)
int_histo.SetBinError(i, error.value)
info = wrp.all_info()
return wrappers.HistoWrapper(int_histo, **info)
@add_wrp_kws
@history.track_history
def chi2(wrps, x_min=0, x_max=0):
"""
Expects two Histowrappers. Returns FloatWrapper.
"""
wrps = iterableize(wrps)
wrps = iter(wrps)
try:
first, second = next(wrps), next(wrps)
except StopIteration:
raise TooFewWrpsError("chi2 needs exactly two HistoWrappers.")
try:
wrps.next()
raise TooManyWrpsError("chi2 needs exactly two HistoWrappers.")
except StopIteration:
pass
for w in (first, second):
if not isinstance(w, wrappers.HistoWrapper):
raise WrongInputError(
"chi2 needs type HistoWrapper. w: "
+ str(w)
)
if not first.histo.GetNbinsX() == second.histo.GetNbinsX():
raise WrongInputError(
"chi2 needs histos with same number of bins."
)
if not x_max:
x_max = int(first.histo.GetNbinsX() - 1)
def get_weight_for_bin(i):
val = (first.histo.GetBinContent(i+1)
- second.histo.GetBinContent(i+1))**2
err1 = first.histo.GetBinError(i+1)
err2 = second.histo.GetBinError(i+1)
if err1 and err2:
return val / (err1**2 + err2**2)
else:
return 0.
chi2_val = __builtin__.sum(
get_weight_for_bin(i)
for i in xrange(x_min, x_max)
)
info = second.all_info()
info.update(first.all_info())
return wrappers.FloatWrapper(
chi2_val,
**info
)
@add_wrp_kws
@history.track_history
def eff(wrps, option=''):
"""
Applies to HistoWrappers only. Returns GraphWrapper. Takes lumi from first.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1)
1
>>> h1.Fill(1)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2)
>>> h2 = TH1I("h2", "", 2, .5, 2.5)
>>> h2.Sumw2()
>>> h2.Fill(1)
1
>>> h2.Fill(1)
1
>>> h2.Fill(1)
1
>>> h2.Fill(2)
2
>>> w2 = wrappers.HistoWrapper(h2, lumi=3)
>>> w3 = eff([w1, w2])
>>> w3.graph.GetN()
2
>>> hi = w3.graph.GetErrorYhigh(0)
>>> lo = w3.graph.GetErrorYlow(0)
>>> abs(hi - 0.277375360987) < 1e-10
True
>>> abs(lo - 0.414534706284) < 1e-10
True
"""
wrps = iterableize(wrps)
wrps = iter(wrps)
try:
nominator = next(wrps)
denominator = next(wrps)
except StopIteration:
raise TooFewWrpsError("eff needs exactly two Wrappers.")
try:
wrps.next()
raise TooManyWrpsError("eff needs exactly two Wrappers.")
except StopIteration:
pass
if not isinstance(nominator, wrappers.HistoWrapper):
raise WrongInputError(
"eff needs nominator to be of type HistoWrapper. nominator: "
+ str(nominator)
)
if not (isinstance(denominator, wrappers.HistoWrapper)):
raise WrongInputError(
"eff needs denominator to be of type HistoWrapper. denominator: "
+ str(denominator)
)
graph = TGraphAsymmErrors(nominator.histo, denominator.histo, option)
graph.GetXaxis().SetTitle(nominator.histo.GetXaxis().GetTitle())
info = nominator.all_info()
return wrappers.GraphWrapper(graph, **info)
if __name__ == "__main__":
import ROOT
ROOT.TH1.AddDirectory(False)
import doctest
doctest.testmod()
| gpl-3.0 | 8,743,581,249,339,879,000 | 25.882488 | 93 | 0.552413 | false | 3.053389 | false | false | false |
KhronosGroup/COLLADA-CTS | StandardDataSets/1_5/collada/asset/coverage/geographic_location/absolute/absolute.py | 1 | 4333 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = [['asset', 'coverage', 'geographic_location', 'longitude'],
['asset', 'coverage', 'geographic_location', 'latitude'],
['asset', 'coverage', 'geographic_location', 'altitude']]
attrName = 'mode'
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
# if superior fails, no point in further checking
if (self.status_superior == False):
self.status_exemplary = self.status_superior
return self.status_exemplary
# Compare the rendered images
self.__assistant.CompareRenderedImages(context)
self.__assistant.ElementDataPreserved(context, self.tagList[0], "float")
self.__assistant.ElementDataPreserved(context, self.tagList[1], "float")
self.__assistant.ElementDataPreserved(context, self.tagList[2], "float")
self.__assistant.AttributePreserved(context, self.tagList[2], self.attrName)
self.status_exemplary = self.__assistant.DeferJudgement(context)
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| mit | 4,687,221,361,627,715,000 | 52.225 | 466 | 0.708054 | false | 4.182432 | false | false | false |
ministryofjustice/manchester_traffic_offences_pleas | apps/plea/tests/test_accessibility_switcher.py | 1 | 2202 | from django.test import TestCase
from django.test.client import Client
from django.conf import settings
from importlib import import_module
from waffle.models import Switch
class TestAccessibilitySwitcher(TestCase):
def setUp(self):
self.client = Client()
# http://code.djangoproject.com/ticket/10899
settings.SESSION_ENGINE = 'django.contrib.sessions.backends.file'
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
self.session = store
self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
def test_a11y_testing_waffle_switch_off(self):
response = self.client.get("/set-a11y-testing/")
self.assertEqual(response.status_code, 404)
def test_a11y_testing_mode_tota11y(self):
Switch.objects.create(name="enable_a11y_testing", active=True)
response = self.client.get("/set-a11y-testing/?mode=tota11y")
response = self.client.get("/")
self.assertContains(response, "/static/javascripts/vendor/tota11y.min.js")
def test_a11y_testing_mode_google(self):
Switch.objects.create(name="enable_a11y_testing", active=True)
response = self.client.get("/set-a11y-testing/?mode=google")
response = self.client.get("/")
self.assertContains(response, "/static/javascripts/vendor/axs_testing.js")
def test_a11y_testing_mode_off(self):
Switch.objects.create(name="enable_a11y_testing", active=True)
response = self.client.get("/set-a11y-testing/?mode=off")
response = self.client.get("/")
self.assertNotContains(response, "/static/javascripts/vendor/tota11y.min.js")
self.assertNotContains(response, "/static/javascripts/vendor/axs_testing.js")
def test_a11y_testing_mode_wrong(self):
Switch.objects.create(name="enable_a11y_testing", active=True)
response = self.client.get("/set-a11y-testing/?mode=gfhdjaks")
response = self.client.get("/")
self.assertNotContains(response, "/static/javascripts/vendor/tota11y.min.js")
self.assertNotContains(response, "/static/javascripts/vendor/axs_testing.js")
| mit | -3,648,315,149,922,748,000 | 36.965517 | 85 | 0.690736 | false | 3.580488 | true | false | false |
opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/packages/executors.py | 1 | 4271 | from waldur_core.core import executors as core_executors
from waldur_core.core import tasks as core_tasks
from waldur_core.core import utils as core_utils
from waldur_core.structure import executors as structure_executors
from waldur_mastermind.packages.serializers import _get_template_quotas
from waldur_openstack.openstack import executors as openstack_executors
from . import tasks
class OpenStackPackageCreateExecutor(core_executors.BaseExecutor):
@classmethod
def get_task_signature(cls, package, serialized_package, **kwargs):
tenant = package.tenant
serialized_tenant = core_utils.serialize_instance(tenant)
service_settings = package.service_settings
serialized_service_settings = core_utils.serialize_instance(service_settings)
create_tenant = openstack_executors.TenantCreateExecutor.get_task_signature(
tenant, serialized_tenant, **kwargs
)
set_tenant_ok = openstack_executors.TenantCreateExecutor.get_success_signature(
tenant, serialized_tenant
)
populate_service_settings = tasks.OpenStackPackageSettingsPopulationTask().si(
serialized_package
)
create_service_settings = structure_executors.ServiceSettingsCreateExecutor.get_task_signature(
service_settings, serialized_service_settings
)
return (
create_tenant
| set_tenant_ok
| populate_service_settings
| create_service_settings
)
@classmethod
def get_success_signature(cls, package, serialized_package, **kwargs):
""" Get Celery signature of task that should be applied on successful execution. """
service_settings = package.service_settings
serialized_service_settings = core_utils.serialize_instance(service_settings)
return core_tasks.StateTransitionTask().si(
serialized_service_settings, state_transition='set_ok'
)
@classmethod
def get_failure_signature(cls, package, serialized_package, **kwargs):
return tasks.OpenStackPackageErrorTask().s(serialized_package)
class OpenStackPackageChangeExecutor(core_executors.BaseExecutor):
@classmethod
def get_success_signature(
cls,
tenant,
serialized_tenant,
new_template,
old_package,
service_settings,
**kwargs
):
service_settings = core_utils.serialize_instance(service_settings)
return tasks.LogOpenStackPackageChange().si(
serialized_tenant,
event='succeeded',
new_package=new_template.name,
old_package=old_package.template.name,
service_settings=service_settings,
)
@classmethod
def get_failure_signature(
cls,
tenant,
serialized_tenant,
new_template,
old_package,
service_settings,
**kwargs
):
service_settings = core_utils.serialize_instance(service_settings)
return tasks.LogOpenStackPackageChange().si(
serialized_tenant,
event='failed',
new_package=new_template.name,
old_package=old_package.template.name,
service_settings=service_settings,
)
@classmethod
def get_task_signature(
cls,
tenant,
serialized_tenant,
new_template,
old_package,
service_settings,
**kwargs
):
quotas = {
quota_field.name: value
for quota_field, value in _get_template_quotas(new_template).items()
}
push_quotas = openstack_executors.TenantPushQuotasExecutor.as_signature(
tenant, quotas=quotas
)
serialized_new_template = core_utils.serialize_instance(new_template)
serialized_old_package = core_utils.serialize_instance(old_package)
serialized_service_settings = core_utils.serialize_instance(service_settings)
success_package_change = tasks.OpenStackPackageSuccessTask().si(
serialized_tenant,
serialized_new_template,
serialized_old_package,
serialized_service_settings,
)
return push_quotas | success_package_change
| mit | -6,142,464,563,913,863,000 | 34.008197 | 103 | 0.656989 | false | 4.53397 | false | false | false |
EmreAtes/spack | var/spack/repos/builtin/packages/texlive/package.py | 1 | 3446 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Texlive(Package):
"""TeX Live is a free software distribution for the TeX typesetting
system. Heads up, it's is not a reproducible installation."""
homepage = "http://www.tug.org/texlive"
# Install from specific site because the texlive mirrors do not
# all update in synchrony.
#
# BEWARE: TexLive updates their installs frequently (probably why
# they call it *Live*...). There is no good way to provide a
# repeatable install of the package.
#
# We're now pulling the installation bits from tug.org's repo of
# historic bits. This means that the checksum for the installer
# itself is stable. Don't let that fool you though, it's still
# installing TeX **LIVE** from e.g. ctan.math.... below, which is
# not reproducible.
version('live', '8f8fc301514c08a89a2e97197369c648',
url='ftp://tug.org/historic/systems/texlive/2017/install-tl-unx.tar.gz')
# There does not seem to be a complete list of schemes.
# Examples include:
# full scheme (everything)
# medium scheme (small + more packages and languages)
# small scheme (basic + xetex, metapost, a few languages)
# basic scheme (plain and latex)
# minimal scheme (plain only)
# See:
# https://www.tug.org/texlive/doc/texlive-en/texlive-en.html#x1-25025r6
variant(
'scheme',
default='small',
values=('minimal', 'basic', 'small', 'medium', 'full'),
description='Package subset to install'
)
depends_on('perl', type='build')
def install(self, spec, prefix):
# Using texlive's mirror system leads to mysterious problems,
# in lieu of being able to specify a repository as a variant, hardwire
# a particular (slow, but central) one for now.
_repository = 'http://ctan.math.washington.edu/tex-archive/systems/texlive/tlnet/'
env = os.environ
env['TEXLIVE_INSTALL_PREFIX'] = prefix
perl = which('perl')
scheme = spec.variants['scheme'].value
perl('./install-tl', '-scheme', scheme,
'-repository', _repository,
'-portable', '-profile', '/dev/null')
| lgpl-2.1 | 428,417,736,615,517,630 | 42.620253 | 90 | 0.653511 | false | 3.889391 | false | false | false |
junzis/py-adsb-decoder | pyModeS/extra/aero.py | 1 | 5201 | """
Functions for aeronautics in this module
- physical quantities always in SI units
- lat,lon,course and heading in degrees
International Standard Atmosphere
::
p,rho,T = atmos(H) # atmos as function of geopotential altitude H [m]
a = vsound(H) # speed of sound [m/s] as function of H[m]
p = pressure(H) # calls atmos but retruns only pressure [Pa]
T = temperature(H) # calculates temperature [K]
rho = density(H) # calls atmos but retruns only pressure [Pa]
Speed conversion at altitude H[m] in ISA
::
Mach = tas2mach(Vtas,H) # true airspeed (Vtas) to mach number conversion
Vtas = mach2tas(Mach,H) # true airspeed (Vtas) to mach number conversion
Vtas = eas2tas(Veas,H) # equivalent airspeed to true airspeed, H in [m]
Veas = tas2eas(Vtas,H) # true airspeed to equivent airspeed, H in [m]
Vtas = cas2tas(Vcas,H) # Vcas to Vtas conversion both m/s, H in [m]
Vcas = tas2cas(Vtas,H) # Vtas to Vcas conversion both m/s, H in [m]
Vcas = mach2cas(Mach,H) # Mach to Vcas conversion Vcas in m/s, H in [m]
Mach = cas2mach(Vcas,H) # Vcas to mach copnversion Vcas in m/s, H in [m]
"""
import numpy as np
"""Aero and geo Constants """
kts = 0.514444 # knot -> m/s
ft = 0.3048 # ft -> m
fpm = 0.00508 # ft/min -> m/s
inch = 0.0254 # inch -> m
sqft = 0.09290304 # 1 square foot
nm = 1852. # nautical mile -> m
lbs = 0.453592 # pound -> kg
g0 = 9.80665 # m/s2, Sea level gravity constant
R = 287.05287 # m2/(s2 x K), gas constant, sea level ISA
p0 = 101325. # Pa, air pressure, sea level ISA
rho0 = 1.225 # kg/m3, air density, sea level ISA
T0 = 288.15 # K, temperature, sea level ISA
gamma = 1.40 # cp/cv for air
gamma1 = 0.2 # (gamma-1)/2 for air
gamma2 = 3.5 # gamma/(gamma-1) for air
beta = -0.0065 # [K/m] ISA temp gradient below tropopause
r_earth = 6371000. # m, average earth radius
a0 = 340.293988 # m/s, sea level speed of sound ISA, sqrt(gamma*R*T0)
def atmos(H):
# H in metres
T = np.maximum(288.15 - 0.0065 * H, 216.65)
rhotrop = 1.225 * (T / 288.15)**4.256848030018761
dhstrat = np.maximum(0., H - 11000.0)
rho = rhotrop * np.exp(-dhstrat / 6341.552161)
p = rho * R * T
return p, rho, T
def temperature(H):
p, r, T = atmos(H)
return T
def pressure(H):
p, r, T = atmos(H)
return p
def density(H):
p, r, T = atmos(H)
return r
def vsound(H):
"""Speed of sound"""
T = temperature(H)
a = np.sqrt(gamma * R * T)
return a
def distance(lat1, lon1, lat2, lon2, H=0):
"""
Compute spherical distance from spherical coordinates.
For two locations in spherical coordinates
(1, theta, phi) and (1, theta', phi')
cosine( arc length ) =
sin phi sin phi' cos(theta-theta') + cos phi cos phi'
distance = rho * arc length
"""
# phi = 90 - latitude
phi1 = np.radians(90.0 - lat1)
phi2 = np.radians(90.0 - lat2)
# theta = longitude
theta1 = np.radians(lon1)
theta2 = np.radians(lon2)
cos = np.sin(phi1) * np.sin(phi2) * np.cos(theta1 - theta2) + np.cos(phi1) * np.cos(phi2)
cos = np.where(cos>1, 1, cos)
arc = np.arccos(cos)
dist = arc * (r_earth + H) # meters, radius of earth
return dist
def bearing(lat1, lon1, lat2, lon2):
lat1 = np.radians(lat1)
lon1 = np.radians(lon1)
lat2 = np.radians(lat2)
lon2 = np.radians(lon2)
x = np.sin(lon2-lon1) * np.cos(lat2)
y = np.cos(lat1) * np.sin(lat2) \
- np.sin(lat1) * np.cos(lat2) * np.cos(lon2-lon1)
initial_bearing = np.arctan2(x, y)
initial_bearing = np.degrees(initial_bearing)
bearing = (initial_bearing + 360) % 360
return bearing
# -----------------------------------------------------
# Speed conversions, altitude H all in meters
# -----------------------------------------------------
def tas2mach(Vtas, H):
"""True Airspeed to Mach number"""
a = vsound(H)
Mach = Vtas/a
return Mach
def mach2tas(Mach, H):
"""Mach number to True Airspeed"""
a = vsound(H)
Vtas = Mach*a
return Vtas
def eas2tas(Veas, H):
"""Equivalent Airspeed to True Airspeed"""
rho = density(H)
Vtas = Veas * np.sqrt(rho0/rho)
return Vtas
def tas2eas(Vtas, H):
"""True Airspeed to Equivalent Airspeed"""
rho = density(H)
Veas = Vtas * np.sqrt(rho/rho0)
return Veas
def cas2tas(Vcas, H):
"""Calibrated Airspeed to True Airspeed"""
p, rho, T = atmos(H)
qdyn = p0*((1.+rho0*Vcas*Vcas/(7.*p0))**3.5-1.)
Vtas = np.sqrt(7.*p/rho*((1.+qdyn/p)**(2./7.)-1.))
return Vtas
def tas2cas(Vtas, H):
"""True Airspeed to Calibrated Airspeed"""
p, rho, T = atmos(H)
qdyn = p*((1.+rho*Vtas*Vtas/(7.*p))**3.5-1.)
Vcas = np.sqrt(7.*p0/rho0*((qdyn/p0+1.)**(2./7.)-1.))
return Vcas
def mach2cas(Mach, H):
"""Mach number to Calibrated Airspeed"""
Vtas = mach2tas(Mach, H)
Vcas = tas2cas(Vtas, H)
return Vcas
def cas2mach(Vcas, H):
"""Calibrated Airspeed to Mach number"""
Vtas = cas2tas(Vcas, H)
Mach = tas2mach(Vtas, H)
return Mach
| mit | 6,628,668,726,650,856,000 | 27.266304 | 93 | 0.584503 | false | 2.725891 | false | false | false |
saltastro/saltefficiency | dataquality/upload_throughput.py | 1 | 4898 | import os
import argparse
import glob
import traceback
import mysql
import dataquality as dq
def upload_throughput(sdb, infile, force=False):
"""Upload throughput measurements to the Science Database
Parameters
----------
sdb: ~mysql.sdb
Connection to the Science Database
infile: str
Path to file to upload to the database
force: bool
If True, it will update the database even if an entry
already exists
"""
# parse the name of the file
tab_name, obsdate = dq.parse_filename(infile)
# check if it is already in the table
sel_cmd = "{}_Id, Throughput_Id".format(tab_name)
tab_cmd = "{} join Throughput using (Throughput_Id) join NightInfo using (NightInfo_id) ".format(tab_name)
log_cmd = " Date = '{}-{}-{}'".format(obsdate[0:4], obsdate[4:6], obsdate[6:8])
record = sdb.select(sel_cmd, tab_cmd, log_cmd)
if len(record) > 0 and not force: return
if os.path.basename(infile).startswith('Rss'):
instr='Rss'
elif os.path.basename(infile).startswith('Salticam'):
instr='Salticam'
else:
raise ValueError("File name not recognized")
# parse the file and update or insert into the database
lines = open(infile).readlines()
if len(lines) < 3 :
raise ValueError("Insufficient number of lines in {}".format(infile))
stars = lines[0].strip()
comment = lines[1].strip().strip('\'')
nid = sdb.select('NightInfo_Id', 'NightInfo', log_cmd)[0][0]
#create throughput
try:
tid = sdb.select('Throughput_Id','Throughput', 'NightInfo_Id={}'.format(nid))[0][0]
except:
ins_cmd = "NightInfo_Id = {} , StarsUsed = '{}', Comments = '{}'".format(nid, stars, comment)
sdb.insert(ins_cmd, 'Throughput')
tid = sdb.select('Throughput_Id','Throughput', 'NightInfo_Id={}'.format(nid))[0][0]
if force:
upd_cmd = "StarsUsed = '{}', Comments = '{}'".format(stars, comment)
sdb.update(upd_cmd, 'Throughput', 'Throughput_Id={}'.format(tid))
# upload each of the filters
for l in lines[2:]:
if not l.strip(): return
l = l.split()
if instr == 'Rss':
l[0] = l[0].strip(',')
try:
fid = sdb.select('RssFilter_Id', 'RssFilter', 'Barcode="{}"'.format(l[0]))[0][0]
except IndexError:
raise ValueError('{} is not an RSS Filter'.format(l[0]))
ins_cmd = 'RssFilter_Id={}, RssThroughputMeasurement={}'.format(fid, l[1])
up_cmd = 'RssFilter_Id={} and Throughput_Id={}'.format(fid, tid)
elif instr == 'Salticam':
l[0] = l[0].strip(',')
try:
fid = sdb.select('SalticamFilter_Id', 'SalticamFilter', 'SalticamFilter_Name="{}"'.format(l[0]))[0][0]
except IndexError:
raise ValueError('{} is not an Salticam Filter'.format(l[0]))
ins_cmd = '{}Filter_Id={}, Throughput_Id={}, {}={}'.format(instr, fid, tid, tab_name, l[1])
if len(record)==0:
sdb.insert(ins_cmd, tab_name)
elif force:
up_cmd = '{}Filter_Id={} and Throughput_Id={}'.format(instr, fid, tid)
uid = sdb.select('{}_Id'.format(tab_name), tab_name, up_cmd)[0][0]
sdb.update(ins_cmd, tab_name, '{}_Id={}'.format(tab_name, uid))
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Upload throughput measurents ot the SDB')
parser.add_argument('-dir', dest='throughput_dir', action='store',
default='/salt/logs/dataquality/throughput/',
help='Directory with throughput files')
parser.add_argument('-f', dest='force', action='store_const',
const=True, default=False,
help='Force the updates')
parser.add_argument('-e', dest='email', action='store_const',
const=True, default=False,
help='Email error results')
args = parser.parse_args()
user=os.environ['SDBUSER']
password=os.environ['SDBPASS']
sdb=mysql.mysql(sdbhost, sdbname, user, password, port=3306)
#get the file names
error_msg = ''
for infile in glob.glob(args.throughput_dir+'*.txt'):
try:
upload_throughput(sdb, infile, force=args.force)
except ValueError, e:
error_msg += infile + '\n' + traceback.format_exc() + str(e) + '\n\n'
except IOError, e:
error_msg += infile + '\n' + traceback.format_exc() + str(e) + '\n\n'
if error_msg: print(error_msg)
if email and error_msg:
mailuser = os.environ['MAILUSER']
mailpass = os.environ['MAILPASS']
dq.send_email(error_msg, 'UPLOAD_TRHOUGHPUT Error', username=mailuser,
password=mailpass, to=os.environ['TPUTLIST'], sender = os.environ['MAILSENDER'])
| bsd-3-clause | 3,220,284,524,995,481,600 | 37.265625 | 117 | 0.57942 | false | 3.518678 | false | false | false |
yashchandak/GNN | Sample_Run/Seq_Dynamic/blogDWdata.py | 1 | 6976 | from __future__ import generators, print_function
import numpy as np
from copy import deepcopy
from random import shuffle
from scipy.io import loadmat
class DataSet(object):
def __init__(self, cfg):
"""Construct a DataSet.
"""
self.cfg = cfg
self.all_walks, self.node_seq = self.get_walks(cfg.walks_dir)
#self.node_seq = self.all_walks[:, -1] # index by ending node
self.all_labels = self.get_labels(cfg.label_dir)
self.all_features= self.get_fetaures(cfg.features_dir)
#Increment the positions by 1 and mark the 0th one as False
self.train_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'train_ids.npy')))
self.val_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'val_ids.npy')))
self.test_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'test_ids.npy')))
# [!!!IMP!!]Assert no overlap between test/val/train nodes
self.label_cache, self.update_cache = {0:list(self.all_labels[0])}, {}
def get_walks(self, path):
walks = np.fliplr(np.loadtxt(path, dtype=np.int)) # reverse the sequence
seq = deepcopy(walks[:,-1])
#rotate around the sequences, such that ends are padded with zeros
for i in range(np.shape(walks)[0]):
non_zeros = np.sum(walks[i] > 0)
walks[i] = np.roll(walks[i], non_zeros)
return walks, seq
def get_fetaures(self, path):
# Serves 2 purpose:
# a) add feature for dummy node 0 a.k.a <EOS> and <unlabeled>
# b) increments index of all features by 1, thus aligning it with indices in walks
all_features = np.load(path)
all_features = all_features.astype(np.float32, copy=False) # Required conversion for Python3
all_features = np.concatenate(([np.zeros(all_features.shape[1])], all_features), 0)
return all_features
def get_labels(self, path):
# Labels start with node '0'; Walks_data with node '1'
# To get corresponding mapping, increment the label node number by 1
# add label for dummy node 0 a.k.a <EOS> and <unlabeled>
all_labels = np.load(path)
all_labels = np.concatenate(([np.zeros(all_labels.shape[1])], all_labels), 0)
return all_labels
def accumulate_label_cache(self, labels, nodes):
#Aggregates all the labels for the corresponding nodes
#and tracks the count of updates made
default = (self.all_labels[0], 0) #Initial estimate -> all_zeros
labels = labels[0]
if self.cfg.data_sets.binary_label_updates:
#Convert to binary and keep only the maximum value as 1
amax = np.argmax(labels, axis = 1)
labels = np.zeros(labels.shape)
for idx, pos in enumerate(amax):
labels[idx,pos] = 1
for idx, node in enumerate(nodes):
prv_label, prv_count = self.update_cache.get(node, default)
new_label = prv_label + labels[idx]
new_count = prv_count + 1
self.update_cache[node] = (new_label, new_count)
def update_label_cache(self):
#Average all the predictions made for the corresponding nodes and reset cache
for k, v in self.update_cache.items():
self.label_cache[k] = list(v[0]/v[1])
self.update_cache = {}
def get_nodes(self, dataset):
nodes = []
if dataset == 'train':
nodes = self.train_nodes
elif dataset == 'val':
nodes = self.val_nodes
elif dataset == 'test':
nodes = self.test_nodes
elif dataset == 'all':
# Get all the nodes except the 0th node
nodes = [True]*len(self.train_nodes)
nodes[0] = False
else:
raise ValueError
return nodes
def next_batch(self, dataset, batch_size, shuffle=True):
nodes = self.get_nodes(dataset)
label_len = np.shape(self.all_labels)[1]
max_len = self.all_walks.shape[1]
# Get position of all walks ending with desired set of nodes
pos = []
seq = []
for node in np.where(nodes)[0]:
temp = np.where(self.node_seq == node)[0]
pos.extend(temp)
seq.extend([node]*len(temp))
pos = np.array(pos)
seq = np.array(seq)
if shuffle:
indices = np.random.permutation(len(pos))
pos = pos[indices]
seq = seq[indices]
if batch_size == -1:
batch_size = len(pos)
tot = len(pos)//batch_size
for i in range(0, len(pos), batch_size):
x = self.all_walks[pos[i: i + batch_size]]
temp = np.array(x)>0 #get locations of all zero inputs
lengths = max_len - np.sum(temp, axis=1)
x = np.swapaxes(x, 0, 1) # convert from (batch x step) to (step x batch)
# get labels for valid data points, for others: select the 0th label
x2 = [[self.label_cache.get(item, self.label_cache[0]) for item in row] for row in x]
y = [list(self.all_labels[item]) for item in seq[i: i+batch_size]]
# get features for all data points
x = [[self.all_features[item] for item in row] for row in x]
#seq = self.node_seq[pos[i: i + batch_size]]
yield (x, x2, seq, y, tot, lengths)
def next_batch_same(self, dataset, node_count=1):
nodes = self.get_nodes(dataset)
pos = []
counts = []
seq = []
for node in np.where(nodes)[0]:
temp = np.where(self.node_seq == node)[0]
counts.append(len(temp))
seq.append(node)
pos.extend(temp)
pos = np.array(pos)
start = 0
max_len = self.all_walks.shape[1]
# Get a batch of all walks for 'node_count' number of node
for idx in range(0, len(counts), node_count):
#print(idx)
stop = start + np.sum(counts[idx:idx+node_count]) #start + total number of walks to be consiudered this time
x = self.all_walks[pos[start:stop]] #get the walks corresponding to respective positions
temp = np.array(x)>0 #get locations of all zero inputs
lengths = max_len - np.sum(temp, axis=1)
x = np.swapaxes(x, 0, 1) # convert from (batch x step) to (step x batch)
# get labels for valid data points, for others: select the 0th label
x2 = [[self.label_cache.get(item, self.label_cache[0]) for item in row] for row in x]
y = [list(self.all_labels[item]) for item in x[-1,:]] #Not useful, only presetn for sake of placeholder
# get features for all data points
x = [[self.all_features[item] for item in row] for row in x]
start = stop
yield (x, x2, seq[idx:idx+node_count], counts[idx:idx+node_count], y, lengths)
| mit | -3,829,700,722,819,435,500 | 38.862857 | 120 | 0.578842 | false | 3.559184 | false | false | false |
DOV-Vlaanderen/pydov | setup.py | 1 | 2043 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
with open('requirements_dev.txt') as f:
# ignore the general requirements
requirements_dev = f.read().splitlines()[1:]
with open('requirements_doc.txt') as f:
requirements_doc = f.read().splitlines()
with open('requirements_vectorfile.txt') as f:
requirements_vectorfile = f.read().splitlines()
setup(
name='pydov',
version='2.1.0',
description=("A Python package to download data from Databank Ondergrond "
"Vlaanderen (DOV)."),
long_description=readme,
long_description_content_type='text/markdown',
author="DOV-Vlaanderen",
author_email='[email protected]',
url='https://github.com/DOV-Vlaanderen/pydov',
packages=find_packages(
include=['pydov']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='pydov',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Natural Language :: Dutch',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering',
],
test_suite='tests',
tests_require=requirements_dev,
extras_require={
'docs': requirements_doc,
'devs': requirements_dev,
'vectorfile': requirements_vectorfile
}
)
| mit | 6,766,960,254,937,599,000 | 31.951613 | 78 | 0.619677 | false | 3.847458 | false | true | false |
ales-erjavec/orange-canvas | orangecanvas/scheme/tests/__init__.py | 1 | 2700 | """
Scheme tests
"""
from AnyQt.QtCore import QObject, QEventLoop, QTimer, QCoreApplication, QEvent
from typing import List
class EventSpy(QObject):
"""
A testing utility class (similar to QSignalSpy) to record events
delivered to a QObject instance.
Note
----
Only event types can be recorded (as QEvent instances are deleted
on delivery).
Note
----
Can only be used with a QCoreApplication running.
Parameters
----------
object : QObject
An object whose events need to be recorded.
etype : Union[QEvent.Type, Sequence[QEvent.Type]
A event type (or types) that should be recorded
"""
def __init__(self, object: QObject, etype, **kwargs):
super().__init__(**kwargs)
if not isinstance(object, QObject):
raise TypeError
self.__object = object
try:
len(etype)
except TypeError:
etypes = {etype}
else:
etypes = set(etype)
self.__etypes = etypes
self.__record = []
self.__loop = QEventLoop()
self.__timer = QTimer(self, singleShot=True)
self.__timer.timeout.connect(self.__loop.quit)
self.__object.installEventFilter(self)
def wait(self, timeout=5000):
"""
Start an event loop that runs until a spied event or a timeout occurred.
Parameters
----------
timeout : int
Timeout in milliseconds.
Returns
-------
res : bool
True if the event occurred and False otherwise.
Example
-------
>>> app = QCoreApplication.instance() or QCoreApplication([])
>>> obj = QObject()
>>> spy = EventSpy(obj, QEvent.User)
>>> app.postEvent(obj, QEvent(QEvent.User))
>>> spy.wait()
True
>>> print(spy.events())
[1000]
"""
count = len(self.__record)
self.__timer.stop()
self.__timer.setInterval(timeout)
self.__timer.start()
self.__loop.exec_()
self.__timer.stop()
return len(self.__record) != count
def eventFilter(self, reciever: QObject, event: QEvent) -> bool:
if reciever is self.__object and event.type() in self.__etypes:
self.__record.append(event.type())
if self.__loop.isRunning():
self.__loop.quit()
return super().eventFilter(reciever, event)
def events(self) -> List[QEvent.Type]:
"""
Return a list of all (listened to) event types that occurred.
Returns
-------
events : List[QEvent.Type]
"""
return list(self.__record)
| gpl-3.0 | 5,709,660,334,887,075,000 | 26.835052 | 80 | 0.554815 | false | 4.292528 | false | false | false |
AlexMathew/csipy-exercises | solution/words.py | 1 | 1277 | import sys
def setup(words):
new_words = []
for word in words:
new_words.append(word.lower())
words = new_words
# This could have been done easier with list comprehensions.
# words = [word.lower() for word in words]
wordset = set()
wordcount = dict()
for word in words:
prev_size = len(wordset)
wordset.add(word)
new_size = len(wordset)
if new_size > prev_size:
wordcount[word] = words.count(word)
return wordset, wordcount
def main():
if len(sys.argv) == 1 or len(sys.argv) > 2:
print 'FORMAT : python words.py --count|--set'
sys.exit(0)
# This could have been done by using exception handlers for IndexError.
option = sys.argv[1]
if option not in ['--count', '--set']:
print 'FORMAT : python words.py --count|--set'
sys.exit(0)
try:
with open('input.txt', 'r') as f:
text = f.read()
except Exception:
print 'Rename one of the two files there as input.txt'
words = text.split()
wordset, wordcount = setup(words)
if option == '--set':
content = " ".join(sorted(list(wordset)))
with open('output.txt', 'w') as f:
f.write(content)
elif option == '--count':
content = " ".join(sorted(wordcount, key=wordcount.get, reverse=True))
with open('output.txt', 'w') as f:
f.write(content)
if __name__ == '__main__':
main() | mit | 7,769,703,426,424,290,000 | 26.782609 | 73 | 0.651527 | false | 2.831486 | false | false | false |
inf0-warri0r/music_cat | classifier/classifier.py | 1 | 4355 | #!/usr/bin/env python
"""
Author : tharindra galahena (inf0_warri0r)
Project: classifing music using neural network
Blog : http://www.inf0warri0r.blogspot.com
Date : 23/05/2013
License:
Copyright 2013 Tharindra Galahena
This is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version. This is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
* You should have received a copy of the GNU General Public License along with
this. If not, see http://www.gnu.org/licenses/.
"""
from PySide import QtCore, QtGui
from classify import Ui_classifier
import os
import sys
import file_read
import histograme
import thread
import neural_net
import plot
class MyWidget(QtGui.QMainWindow, Ui_classifier):
def __init__(self, parent=None):
super(MyWidget, self).__init__(parent)
self.setupUi(self)
self.file_name = ""
self.hist_u = list()
self.hist_n = list()
self.net = neural_net.neural(10, 1, 3, 15, 0.001, 0.0)
self.net.init()
self.net.put_weights(self.load())
self.img = ""
self.convert.clicked.connect(self.convert_file)
self.classify.clicked.connect(self.classify_func)
self.browse.clicked.connect(self.browse_func)
self.hist_lable.setScaledContents(True)
self.run = True
self.timer = QtCore.QTimer()
self.timer.setInterval(1000)
self.timer.timeout.connect(self.re_draw)
self.timer.start()
def browse_func(self):
fname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file')
self.music_file.setText(str(fname))
def re_draw(self):
if not self.run:
QtGui.QMessageBox.about(self, "Done", "Done !!!")
self.run = True
return 0
def convert_file(self):
r, w = os.pipe()
self.file_name = self.music_file.text()
if self.file_name == "":
QtGui.QMessageBox.about(self, "ERROR", "invaild file")
return 0
pid = os.fork()
if pid:
os.waitpid(pid, 0)
else:
os.execlp("ffmpeg", "ffmpeg", "-i",
self.file_name, "-y", "out.aif")
exit(0)
try:
thread.start_new_thread(self.thread_func, ())
except Exception:
QtGui.QMessageBox.about(self, "ERROR", "thread error")
def thread_func(self):
self.run = True
f = file_read.file_read(("out.aif", "out.aif"))
f.convert()
f.save("./")
self.image = f.image
h = histograme.histograme(f.image)
h.create_histograme()
self.hist_u = h.unnormaliced_histograme()
self.hist_n = h.normalice_histograme()
print "done"
self.run = False
def classify_func(self):
p = plot.plot(self.hist_u, 600, 400, (256, 125, 0), (256, 256, 256))
p.set_scales()
p.set_plot()
p.draw("hist.jpg")
qimage = QtGui.QImage("out.aif.jpg")
pix = QtGui.QPixmap.fromImage(qimage)
self.label.setPixmap(pix)
qimage = QtGui.QImage("hist.jpg")
pix = QtGui.QPixmap.fromImage(qimage)
self.hist_lable.setPixmap(pix)
try:
thread.start_new_thread(self.thread_func2, ())
except Exception:
QtGui.QMessageBox.about(self, "ERROR", "thread error")
def thread_func2(self):
print self.hist_n
out = self.net.update(self.hist_n)
print out
self.gener.setText("")
if out[0] < 0.5:
self.type = "Rock"
else:
self.type = "Classic"
self.gener.setText(self.type)
def load(self):
f = open('weights', 'r')
cat = f.read()
f.close()
weights = list()
lst = cat.splitlines()
for i in range(0, len(lst)):
weights.append(float(lst[i]))
return weights
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
window = MyWidget()
window.show()
sys.exit(app.exec_())
| agpl-3.0 | 809,784,179,446,759,300 | 27.279221 | 78 | 0.595867 | false | 3.500804 | false | false | false |
dials/dials | tests/algorithms/indexing/test_non_primitive_basis.py | 1 | 1945 | import pytest
import scitbx.matrix
from cctbx import sgtbx
from cctbx.sgtbx import bravais_types
from dxtbx.model import Crystal, Experiment, ExperimentList
from dials.algorithms.indexing import assign_indices, non_primitive_basis
from dials.array_family import flex
@pytest.mark.parametrize("space_group_symbol", bravais_types.acentric)
def test_detect(space_group_symbol):
sgi = sgtbx.space_group_info(space_group_symbol)
cs = sgi.any_compatible_crystal_symmetry(volume=1000)
ms = cs.build_miller_set(anomalous_flag=True, d_min=1).expand_to_p1()
result = non_primitive_basis.detect(ms.indices())
if sgi.group().conventional_centring_type_symbol() != "P":
assert result is not None
assert isinstance(result, scitbx.matrix.sqr)
assert result.n == (3, 3)
else:
assert result is None
@pytest.mark.parametrize("space_group_symbol", bravais_types.acentric)
def test_correct(space_group_symbol):
sgi = sgtbx.space_group_info(space_group_symbol)
cs = sgi.any_compatible_crystal_symmetry(volume=1000)
ms = cs.build_miller_set(anomalous_flag=True, d_min=1).expand_to_p1()
# the reciprocal matrix
B = scitbx.matrix.sqr(cs.unit_cell().fractionalization_matrix()).transpose()
crystal = Crystal(B, sgtbx.space_group())
expts = ExperimentList([Experiment(crystal=crystal)])
refl = flex.reflection_table()
refl["miller_index"] = ms.indices()
refl["rlp"] = B.elems * ms.indices().as_vec3_double()
refl["imageset_id"] = flex.int(len(refl))
refl["xyzobs.mm.value"] = flex.vec3_double(len(refl))
non_primitive_basis.correct(expts, refl, assign_indices.AssignIndicesGlobal())
cs_corrected = expts.crystals()[0].get_crystal_symmetry()
assert cs_corrected.change_of_basis_op_to_primitive_setting().is_identity_op()
assert (
cs.change_of_basis_op_to_primitive_setting().apply(ms.indices())
== refl["miller_index"]
)
| bsd-3-clause | 7,598,806,288,202,229,000 | 37.137255 | 82 | 0.705398 | false | 3.020186 | false | false | false |
usc-isi/horizon-old | horizon/horizon/dashboards/settings/user/urls.py | 1 | 1066 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns, url
from django.views.generic import TemplateView
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(
template_name='settings/user/settings.html'),
name='index'))
| apache-2.0 | -3,605,575,973,021,152,000 | 37.071429 | 78 | 0.722326 | false | 4.084291 | false | false | false |
codelucas/facebook-context | backend-flask/backend/example.py | 1 | 15518 | #!/usr/bin/env python
# Copyright 2013 AlchemyAPI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from alchemyapi import AlchemyAPI
import json
demo_text = 'Yesterday dumb Bob destroyed my fancy iPhone in beautiful Denver, Colorado. I guess I will have to head over to the Apple Store and buy a new one.'
demo_url = 'http://www.npr.org/2013/11/26/247336038/dont-stuff-the-turkey-and-other-tips-from-americas-test-kitchen'
demo_html = '<html><head><title>Python Demo | AlchemyAPI</title></head><body><h1>Did you know that AlchemyAPI works on HTML?</h1><p>Well, you do now.</p></body></html>'
print('')
print('')
print(' , ')
print(' .I7777~ ')
print(' .I7777777 ')
print(' +. 77777777 ')
print(' =???, I7777777= ')
print('=?????? 7777777? ,:::===? ')
print('=???????. 777777777777777777~ .77: ?? :7 =$, :$$$$$$+ =$? ')
print(' ????????: .777777777777777777 II77 ?? :7 $$7 :$? 7$7 =$? ')
print(' .???????= +7777777777777777 .7 =7: ?? :7777+ :7:I777? ?777I= 77~777? ,777I I7 77 +$?$: :$? $$ =$? ')
print(' ???????+ ~777???+===::: :7+ ~7 ?? .77 +7 :7?. II 7~ ,I7 77+ I77 ~7 ?7 =7: .$, =$ :$? ,$$? =$? ')
print(' ,???????~ 77 7: ?? ?I. 7 :7 :7 ~7 7 77 =7: 7 7 7~ 7$ $= :$$$$$$~ =$? ')
print(' .??????? ,???I77777777777~ :77777777~ ?? 7: :7 :7 777777777:77 =7 7 +7 ~7 $$$$$$$$I :$? =$? ')
print(' .??????? ,7777777777777777 7= 77 ?? I+ 7 :7 :7 ?? 7,77 =7 7 7~ 7, =$7 $$, :$? =$? ')
print(' .???????. I77777777777777777 +7 ,7??? 77 I7 :7 :7 7~ .?7 77 =7 7 ,77I $+ 7$ :$? =$? ')
print(' ,???????= :77777777777777777~ 7= ~7?? ~I77777 :7 :7 ,777777. 77 =7 7 77, +$ .$::$? =$? ')
print(',??????? :7777777 77 ')
print(' =????? ,7777777 77= ')
print(' +?+ 7777777? ')
print(' + ~7777777 ')
print(' I777777 ')
print(' :~ ')
#Create the AlchemyAPI Object
alchemyapi = AlchemyAPI()
print('')
print('')
print('############################################')
print('# Entity Extraction Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.entities('text',demo_text, { 'sentiment':1 })
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Entities ##')
for entity in response['entities']:
print('text: ', entity['text'].encode('utf-8'))
print('type: ', entity['type'])
print('relevance: ', entity['relevance'])
print('sentiment: ', entity['sentiment']['type'])
if 'score' in entity['sentiment']:
print('sentiment score: ' + entity['sentiment']['score'])
print('')
else:
print('Error in entity extraction call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Keyword Extraction Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.keywords('text',demo_text, { 'sentiment':1 })
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Keywords ##')
for keyword in response['keywords']:
print('text: ', keyword['text'].encode('utf-8'))
print('relevance: ', keyword['relevance'])
print('sentiment: ', keyword['sentiment']['type'])
if 'score' in keyword['sentiment']:
print('sentiment score: ' + keyword['sentiment']['score'])
print('')
else:
print('Error in keyword extaction call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Concept Tagging Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.concepts('text',demo_text)
if response['status'] == 'OK':
print('## Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Concepts ##')
for concept in response['concepts']:
print('text: ', concept['text'])
print('relevance: ', concept['relevance'])
print('')
else:
print('Error in concept tagging call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Sentiment Analysis Example #')
print('############################################')
print('')
print('')
print('Processing html: ', demo_html)
print('')
response = alchemyapi.sentiment('html',demo_html)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Document Sentiment ##')
print('type: ', response['docSentiment']['type'])
if 'score' in response['docSentiment']:
print('score: ', response['docSentiment']['score'])
else:
print('Error in sentiment analysis call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Targeted Sentiment Analysis Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.sentiment_targeted('text',demo_text, 'Denver')
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Targeted Sentiment ##')
print('type: ', response['docSentiment']['type'])
if 'score' in response['docSentiment']:
print('score: ', response['docSentiment']['score'])
else:
print('Error in targeted sentiment analysis call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Text Extraction Example #')
print('############################################')
print('')
print('')
print('Processing url: ', demo_url)
print('')
response = alchemyapi.text('url',demo_url)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Text ##')
print('text: ', response['text'].encode('utf-8'))
print('')
else:
print('Error in text extraction call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Author Extraction Example #')
print('############################################')
print('')
print('')
print('Processing url: ', demo_url)
print('')
response = alchemyapi.author('url',demo_url)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Author ##')
print('author: ', response['author'].encode('utf-8'))
print('')
else:
print('Error in author extraction call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Language Detection Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.language('text',demo_text)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Language ##')
print('language: ', response['language'])
print('iso-639-1: ', response['iso-639-1'])
print('native speakers: ', response['native-speakers'])
print('')
else:
print('Error in language detection call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Title Extraction Example #')
print('############################################')
print('')
print('')
print('Processing url: ', demo_url)
print('')
response = alchemyapi.title('url',demo_url)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Title ##')
print('title: ', response['title'].encode('utf-8'))
print('')
else:
print('Error in title extraction call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Relation Extraction Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.relations('text',demo_text)
if response['status'] == 'OK':
print('## Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Relations ##')
for relation in response['relations']:
if 'subject' in relation:
print('Subject: ', relation['subject']['text'].encode('utf-8'))
if 'action' in relation:
print('Action: ', relation['action']['text'].encode('utf-8'))
if 'object' in relation:
print('Object: ', relation['object']['text'].encode('utf-8'))
print('')
else:
print('Error in relation extaction call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Text Categorization Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.category('text',demo_text)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Category ##')
print('text: ', response['category'])
print('score: ', response['score'])
print('')
else:
print('Error in text categorization call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Feed Detection Example #')
print('############################################')
print('')
print('')
print('Processing url: ', demo_url)
print('')
response = alchemyapi.feeds('url',demo_url)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Feeds ##')
for feed in response['feeds']:
print('feed: ', feed['feed'])
else:
print('Error in feed detection call: ', response['statusInfo'])
print('')
print('')
print('')
print('')
print('')
print('############################################')
print('# Microformats Parsing Example #')
print('############################################')
print('')
print('')
print('Processing url: ', demo_url)
print('')
response = alchemyapi.microformats('url',demo_url)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Microformats ##')
for microformat in response['microformats']:
print('Field: ', microformat['field'].encode('utf-8'))
print('Data: ', microformat['data'])
print('')
else:
print('Error in microformats parsing call: ', response['statusInfo'])
print('')
print('')
print('')
print('')
print('')
print('############################################')
print('# Image Extraction Example #')
print('############################################')
print('')
print('')
print('Processing url: ', demo_url)
print('')
response = alchemyapi.imageExtraction('url',demo_url)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Image ##')
print('Image: ', response['image'])
print('')
else:
print('Error in image extraction call: ', response['statusInfo'])
print('')
print('')
print('')
print('')
print('')
print('############################################')
print('# Taxonomy Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.taxonomy('text',demo_text)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Categories ##')
for category in response['taxonomy']:
print(category['label'], ' : ', category['score'])
print('')
else:
print('Error in taxonomy call: ', response['statusInfo'])
print('')
print('')
print('')
print('')
print('')
print('############################################')
print('# Combined Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.combined('text',demo_text)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Keywords ##')
for keyword in response['keywords']:
print(keyword['text'], ' : ', keyword['relevance'])
print('')
print('## Concepts ##')
for concept in response['concepts']:
print(concept['text'], ' : ', concept['relevance'])
print('')
print('## Entities ##')
for entity in response['entities']:
print(entity['type'], ' : ', entity['text'], ', ', entity['relevance'])
print(' ')
else:
print('Error in combined call: ', response['statusInfo'])
print('')
print('')
| apache-2.0 | -368,163,000,386,899,300 | 27.163339 | 168 | 0.47345 | false | 3.758295 | false | false | false |
oblique-labs/pyVM | rpython/rlib/unicodedata/test/test_unicodedata.py | 1 | 6594 | # encoding: utf-8
import random
import unicodedata
import py
from rpython.rlib.unicodedata import (
unicodedb_3_2_0, unicodedb_5_2_0, unicodedb_6_0_0, unicodedb_6_2_0,
unicodedb_8_0_0)
class TestUnicodeData(object):
def setup_class(cls):
if unicodedata.unidata_version != '5.2.0':
py.test.skip('Needs python with unicode 5.2.0 database.')
seed = random.getrandbits(32)
print "random seed: ", seed
random.seed(seed)
cls.charlist = charlist = []
cls.nocharlist = nocharlist = []
while len(charlist) < 1000 or len(nocharlist) < 1000:
chr = unichr(random.randrange(65536))
try:
charlist.append((chr, unicodedata.name(chr)))
except ValueError:
nocharlist.append(chr)
def test_random_charnames(self):
for chr, name in self.charlist:
assert unicodedb_5_2_0.name(ord(chr)) == name
assert unicodedb_5_2_0.lookup(name) == ord(chr)
def test_random_missing_chars(self):
for chr in self.nocharlist:
py.test.raises(KeyError, unicodedb_5_2_0.name, ord(chr))
def test_isprintable(self):
assert unicodedb_5_2_0.isprintable(ord(' '))
assert unicodedb_5_2_0.isprintable(ord('a'))
assert not unicodedb_5_2_0.isprintable(127)
assert unicodedb_5_2_0.isprintable(0x00010346) # GOTHIC LETTER FAIHU
assert unicodedb_5_2_0.isprintable(0xfffd) # REPLACEMENT CHARACTER
assert unicodedb_5_2_0.isprintable(0xfffd) # REPLACEMENT CHARACTER
assert not unicodedb_5_2_0.isprintable(0xd800) # SURROGATE
assert not unicodedb_5_2_0.isprintable(0xE0020) # TAG SPACE
def test_identifier(self):
assert unicodedb_5_2_0.isxidstart(ord('A'))
assert not unicodedb_5_2_0.isxidstart(ord('_'))
assert not unicodedb_5_2_0.isxidstart(ord('0'))
assert not unicodedb_5_2_0.isxidstart(ord('('))
assert unicodedb_5_2_0.isxidcontinue(ord('A'))
assert unicodedb_5_2_0.isxidcontinue(ord('_'))
assert unicodedb_5_2_0.isxidcontinue(ord('0'))
assert not unicodedb_5_2_0.isxidcontinue(ord('('))
oc = ord(u'日')
assert unicodedb_5_2_0.isxidstart(oc)
def test_compare_functions(self):
def getX(fun, code):
try:
return getattr(unicodedb_5_2_0, fun)(code)
except KeyError:
return -1
for code in range(0x10000):
char = unichr(code)
assert unicodedata.digit(char, -1) == getX('digit', code)
assert unicodedata.numeric(char, -1) == getX('numeric', code)
assert unicodedata.decimal(char, -1) == getX('decimal', code)
assert unicodedata.category(char) == unicodedb_5_2_0.category(code)
assert unicodedata.bidirectional(char) == unicodedb_5_2_0.bidirectional(code)
assert unicodedata.decomposition(char) == unicodedb_5_2_0.decomposition(code)
assert unicodedata.mirrored(char) == unicodedb_5_2_0.mirrored(code)
assert unicodedata.combining(char) == unicodedb_5_2_0.combining(code)
def test_compare_methods(self):
for code in range(0x10000):
char = unichr(code)
assert char.isalnum() == unicodedb_5_2_0.isalnum(code)
assert char.isalpha() == unicodedb_5_2_0.isalpha(code)
assert char.isdecimal() == unicodedb_5_2_0.isdecimal(code)
assert char.isdigit() == unicodedb_5_2_0.isdigit(code)
assert char.islower() == unicodedb_5_2_0.islower(code)
assert char.isnumeric() == unicodedb_5_2_0.isnumeric(code)
assert char.isspace() == unicodedb_5_2_0.isspace(code), hex(code)
assert char.istitle() == (unicodedb_5_2_0.isupper(code) or unicodedb_5_2_0.istitle(code)), code
assert char.isupper() == unicodedb_5_2_0.isupper(code)
assert char.lower() == unichr(unicodedb_5_2_0.tolower(code))
assert char.upper() == unichr(unicodedb_5_2_0.toupper(code))
assert char.title() == unichr(unicodedb_5_2_0.totitle(code)), hex(code)
def test_hangul_difference_520(self):
assert unicodedb_5_2_0.name(40874) == 'CJK UNIFIED IDEOGRAPH-9FAA'
def test_differences(self):
assert unicodedb_5_2_0.name(9187) == 'BENZENE RING WITH CIRCLE'
assert unicodedb_5_2_0.lookup('BENZENE RING WITH CIRCLE') == 9187
py.test.raises(KeyError, unicodedb_3_2_0.lookup, 'BENZENE RING WITH CIRCLE')
py.test.raises(KeyError, unicodedb_3_2_0.name, 9187)
def test_casefolding(self):
assert unicodedb_6_2_0.casefold_lookup(223) == [115, 115]
assert unicodedb_6_2_0.casefold_lookup(976) == [946]
assert unicodedb_5_2_0.casefold_lookup(42592) == None
# 1010 has been remove between 3.2.0 and 5.2.0
assert unicodedb_3_2_0.casefold_lookup(1010) == [963]
assert unicodedb_5_2_0.casefold_lookup(1010) == None
# 7838 has been added in 5.2.0
assert unicodedb_3_2_0.casefold_lookup(7838) == None
assert unicodedb_5_2_0.casefold_lookup(7838) == [115, 115]
# Only lookup who cannot be resolved by `lower` are stored in database
assert unicodedb_3_2_0.casefold_lookup(ord('E')) == None
class TestUnicodeData600(object):
def test_some_additions(self):
additions = {
ord(u"\u20B9"): 'INDIAN RUPEE SIGN',
# u'\U0001F37A'
127866: 'BEER MUG',
# u'\U0001F37B'
127867: 'CLINKING BEER MUGS',
# u"\U0001F0AD"
127149: 'PLAYING CARD QUEEN OF SPADES',
# u"\U0002B740"
177984: "CJK UNIFIED IDEOGRAPH-2B740",
}
for un, name in additions.iteritems():
assert unicodedb_6_0_0.name(un) == name
assert unicodedb_6_0_0.isprintable(un)
def test_special_casing(self):
assert unicodedb_6_0_0.tolower_full(ord('A')) == [ord('a')]
# The German es-zed is special--the normal mapping is to SS.
assert unicodedb_6_0_0.tolower_full(ord(u'\xdf')) == [0xdf]
assert unicodedb_6_0_0.toupper_full(ord(u'\xdf')) == map(ord, 'SS')
assert unicodedb_6_0_0.totitle_full(ord(u'\xdf')) == map(ord, 'Ss')
def test_islower(self):
assert unicodedb_6_2_0.islower(0x2177)
class TestUnicodeData800(object):
def test_changed_in_version_8(self):
assert unicodedb_6_2_0.toupper_full(0x025C) == [0x025C]
assert unicodedb_8_0_0.toupper_full(0x025C) == [0xA7AB]
| mit | -4,423,305,646,221,714,000 | 42.946667 | 107 | 0.614381 | false | 3.130104 | true | false | false |
dereneaton/ipyrad | ipyrad/core/paramsinfo.py | 1 | 21970 | #!/usr/bin/env python
""" Return explanation and options for each parameter.
ip.get_params_info(1) or ip.get_params_info("project_dir")
return the same result. If not argument, a summary of the available
parameters and their numbered references is returned.
Parameter info is stored as a dict of tuples. Each tuple consists of a
short and a long desription for each parameter. By default if you as
for a parameter you'll get the long description
"""
from __future__ import print_function
from collections import OrderedDict
pinfo = OrderedDict([
("0", ("""
(0) assembly_name ----------------------------------------------------
This is the name of your assembly. It will be the prefix for all
directories inside the project directory. An easy default for this
parameter is the name of your project directory. For example if your
project directory is ./white-crowns, then your assembly name could be
white-crowns. Assembly name is variable because you might want to
fork assemblies within a project to try different runs with different
minimum coverage values, different levels of indels allowed, etc.
Examples:
----------------------------------------------------------------------
data.set_params('assembly_name', "white-crowns") ## verbose
----------------------------------------------------------------------
""", "Assembly name. Used to name output directories for assembly steps")
),
("1", ("""
(1) project_dir ------------------------------------------------------
Project name / path for working directory where all data files will be
saved. This parameter affects all steps of assembly (1-7).
Examples:
----------------------------------------------------------------------
data.set_params('project_dir', "./") ## verbose
----------------------------------------------------------------------
""", "Project dir (made in curdir if not present)")
),
("2", ("""
(2) raw_fastq_path ---------------------------------------------------
The directory or files (selected with * wildcard selector) in which
FASTQ data files reside. Files can be gzipped. This parameter affects
only step 1 of assembly. Examples:
----------------------------------------------------------------------
data.set_params("raw_fastq_path", "raw/*.fastq.gz") ## verbose
----------------------------------------------------------------------
""", "Location of raw non-demultiplexed fastq files")
),
("3", ("""
(3) barcodes_path ----------------------------------------------------
Path to the barcodes file used in step 1 of assembly for
demultiplexing. If data are already demultiplexed this can be left
blank. This parameter affects only step 1 of assembly. NB: iPyrad
can only handle one barcodes file at a time, so if you have multiple
barcodes files and multiple raw files then you'll need to run each
separately. Examples:
----------------------------------------------------------------------
data.set_params("barcodes_path", "./barcodes.txt") ## verbose
----------------------------------------------------------------------
""", "Location of barcodes file")
),
("4", ("""
(4) sorted_fastq_path ------------------------------------------------
Path to demultiplexed fastq data. If left blank, this is assigned
automatically to <data.name>_fastq/ within the working directory. If your
data are already demultiplexed then you must enter the location of your
data here. Wildcard selectors can be used to select a subsample of files
within a directory, else all files are selected in the directory.
This parameter affects only step 2 of assembly.
Examples:
----------------------------------------------------------------------
data.set_params("sorted_fastq_path", "data/*.gz") ##
----------------------------------------------------------------------
""", "Location of demultiplexed/sorted fastq files")
),
("5", ("""
(5) assembly_method --------------------------------------------------
A string specifying the desired assembly method. There are four
available options for assembly method:
denovo - Denovo assembly is the classic pyrad method, and
it is the <default> unless otherwise specified.
Denovo will cluster and align all reads from scratch
reference - Reference assembly will map and align reads to the
provided reference sequence, which must be specified
in parameter 28 (reference_sequence). Strict refer-
ence assembly will throw out all unmapped reads,
which could be a significant proportion depending
on the distance between your reference and study
species.'.
----------------------------------------------------------------------
data.set_params("assembly_method", "denovo") ## verbose
----------------------------------------------------------------------
""", "Assembly method (denovo, reference)")
),
("6", ("""
(6) reference_sequence -----------------------------------------------
The path to the reference sequence you desire to map your reads to.
The reference may be either fasta or gzipped fasta. It should be a
complete reference sequence, including all chromosomes, scaffolds, and
contigs in one huge file (most reference sequences available will be
in this format, especially non-model references). The first time you
attempt to use this sequence it will be indexed (we are using bwa
for reference mapping). This is a time intensive process so expect the
first run to take some time, certainly more than ten minutes, but less
than an hour. If you desire to index the reference yourself you can do
this, but best not to unless you really care about bwa indexing
settings. We chose conservative defaults that have worked well for us
on other projects.
A word on the format of the path (this is important). The path may
either be a full path (desirable) or a path relative to the directory
you are running ipyrad from (supported but be careful of the path).
----------------------------------------------------------------------
data.set_params(6) = /home/wat/data/reference.fa ## set a full path
data.set_params(6) = ./data/reference.fa.gz ## set a relative path
data.set_params("reference_sequence") = ./data/reference.fa ## verbose
----------------------------------------------------------------------
""", "Location of reference sequence file")
),
("7", ("""
(7) datatype ---------------------------------------------------------
Options: rad, gbs, 2brad, ddrad, pairddrad, pairgbs, pair3rad,
This parameter affects all steps of assembly (1-7).
Examples:
----------------------------------------------------------------------
data.set_params(7) = 'rad' ## rad data type
data.set_params(7) = 'gbs' ## gbs data type
data.set_params(7) = 'pairddrad' ## gbs data type
data.set_params("datatype") = 'ddrad' ## verbose
----------------------------------------------------------------------
""", "Datatype (see docs): rad, gbs, ddrad, etc.")
),
("8", ("""
(8) restriction_overhang ---------------------------------------------
A tuple containing one or two restriction overhangs. Single digest
RADseq with sonication requires only one overhange, all other data
types should have two. The first is used for detecting barcodes, the
second is not required, but is used in filtering, and is needed for
removal from short DNA fragments. This parameter affects steps 1,2,4,5,
and 7 of assembly.
Examples:
----------------------------------------------------------------------
data.set_params(8) = ("TGCAG", "") ## default rad (PstI)
data.set_params(8) = ("CWGC", "CWGC") ## gbs or pairgbs (ApeKI)
data.set_params(8) = ("CAGT", "AATT") ## ddrad (ApeKI, MSI)
data.set_params(8) = ("CAGT", "AATT") ## pairddrad (ApeKI, MSI)
data.set_params("restriction_overhang") = ("CAGT", "AATT") ## verbose
----------------------------------------------------------------------
""", "Restriction overhang (cut1,) or (cut1, cut2)")
),
("9", ("""
(9) max_low_qual_bases -----------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(9) = 10
data.set_params("max_low_qual_bases") = 6
----------------------------------------------------------------------
""", "Max low quality base calls (Q<20) in a read")
),
("10", ("""
(10) phred_Qscore_offset ---------------------------------------------
The threshold at which a base call is considered low quality during
step 2 filtering is determined by the phred_Qscore_offset. The default
offset is 33, which is equivalent to a minimum qscore of 20 (99% call
confidence). Some older data use a qscore offset of 64. You can toggle
the offset number to change the threshold for low qual bases. For
example, reducing the offset to 26 is equivalent to a minimum qscore
of 13, which is approximately 95% probability of a correct base call.
Examples:
----------------------------------------------------------------------
data.set_params(10) = 33
data.set_params("phred_Qscore_offset") = 26 ## 95% confidence
data.set_params("phred_Qscore_offset") = 43 ## 99.9% confidence
data.set_params("phred_Qscore_offset") = 33
----------------------------------------------------------------------
""", "phred Q score offset (33 is default and very standard)")
),
("11", ("""
(11) mindepth_statistical --------------------------------------------
An integer value indicating the mindepth for statistical base calls
based a binomial probability with H and E estimated from the data.
Base calls are made at >= the value entered. For most reasonable
estimates of E and H, statistical base calls cannot be made below 5
or 6, and will instead be called N.
The parameter affects steps 5 and 7 of assembly.
Examples:
----------------------------------------------------------------------
data.set_params(11) = (6, 6) ## only stat base calls down to depth=6
data.set_params(11) = (10, 5) ## stat calls above 9, majrule from 9-5.
data.set_params(11) = (10, 1) ## stat calls above 9, majrule from 9-1.
data.set_params(mindepth_statistical) = 6 ## verbose
----------------------------------------------------------------------
""", "Min depth for statistical base calling")
),
("12", ("""
(12) mindepth_majrule ------------------------------------------------
An integer value indicating the mindepth for majority-rule base calls.
Base calls are made at >= the value entered. It may often be advant-
ageous to use a low value for majrule calls to preserve most data during
assembly within-samples, so that more data is clustered between samples.
Low depth data can be filtered out later from the final data set if needed.
The parameter affects steps 5 and 7 of assembly.
Examples:
----------------------------------------------------------------------
data.set_params(12) = (6, 6) ## only stat base calls down to depth=6
data.set_params(12) = (10, 5) ## stat calls above 9, majrule from 9-5.
data.set_params(12) = (10, 1) ## stat calls above 9, majrule from 9-1.
data.set_params(mindepth_majrule) = 6 ## verbose
----------------------------------------------------------------------
""", "Min depth for majority-rule base calling")
),
("13", ("""
(13) maxdepth --------------------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(13) = 33
data.set_params("maxdepth") = 33
----------------------------------------------------------------------
""", "Max cluster depth within samples")
),
("14", ("""
(14) clust_threshold -------------------------------------------------
Clustering threshold.
Examples:
----------------------------------------------------------------------
data.set_params(14) = .85 ## clustering similarity threshold
data.set_params(14) = .90 ## clustering similarity threshold
data.set_params(14) = .95 ## very high values not recommended
data.set_params("clust_threshold") = .83 ## verbose
----------------------------------------------------------------------
""", "Clustering threshold for de novo assembly")
),
("15", ("""
(15) max_barcode_mismatch --------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(15) = 1
data.set_params("max_barcode_mismatch") = 1
----------------------------------------------------------------------
""", "Max number of allowable mismatches in barcodes")
),
("16", ("""
(16) filter_adapters ----------------------------------------------
Examples:
-------------------------------------------------------------------
data.set_params(16) = 1
data.set_params("filter_adapters") = 1
-------------------------------------------------------------------
""", "Filter for adapters/primers (1 or 2=stricter)")
),
("17", ("""
(17) filter_min_trim_len ---------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(17) = 1
data.set_params("filter_min_trim_len") = 1
----------------------------------------------------------------------
""", "Min length of reads after adapter trim")
),
("18", ("""
(18) max_alleles_consens ---------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(18) = 1
data.set_params("max_alleles_consens") = 1
----------------------------------------------------------------------
""", "Max alleles per site in consensus sequences")
),
("19", ("""
(19) max_Ns_consens --------------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(19) = 1
data.set_params("max_Ns_consens") = 1
----------------------------------------------------------------------
""", "Max N's (uncalled bases) in consensus")
),
("20", ("""
(20) max_Hs_consens --------------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(20) = 1
data.set_params("max_Hs_consens") = 1
----------------------------------------------------------------------
""", "Max Hs (heterozygotes) in consensus")
),
("21", ("""
(21) min_samples_locus -----------------------------------------------
Minimum number of samples a locus must be shared across to be included
in the exported data set following filtering for sequencing depth,
paralogs, ...
Examples
----------------------------------------------------------------------
data.set_params(21) = 4 ## min 4; most inclusive phylo data
data.set_params(21) = 20 ## min 20; less data, less missing
data.set_params(21) = 1 ## min 1; most data, most missing
data.set_params("min_samples_locus") = 4 ## verbose
----------------------------------------------------------------------
""", "Min # samples per locus for output")
),
("22", ("""
(22) max_SNPs_locus --------------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(22) = 1
data.set_params("max_SNPs_locus") = 1
----------------------------------------------------------------------
""", "Max # SNPs per locus")
),
("23", ("""
(23) max_Indels_locus ------------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(23) = 1
data.set_params("max_Indels_locus") = 1
----------------------------------------------------------------------
""", "Max # of indels per locus")
),
("24", ("""
(24) max_shared_Hs_locus ---------------------------------------------
...
----------------------------------------------------------------------
data.set_params(24) = .25 ## set as proportion of samples
data.set_params(24) = 4 ## set as number of samples
data.set_params(24) = 9999 ## set arbitrarily high
data.set_params("max_shared_Hs_locus") = 4 ## verbose
----------------------------------------------------------------------
""", "Max # heterozygous sites per locus")
),
("25", ("""
(25) trim_reads -- ---------------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params("trim_reads") = (0, -5, 0, 0) ## trims last 5 from R1
data.set_params("trim_reads") = (5, 85, 0, 0) ## trims R1 from 5-85
data.set_params("trim_reads") = (5, 85, 5, 85) ## trims both pairs 5-85
----------------------------------------------------------------------
""", "Trim raw read edges (R1>, <R1, R2>, <R2) (see docs)")
),
("26", ("""
(26) trim_loci -------------------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params("trim_loci") = (0, 5, 5, 0)
----------------------------------------------------------------------
""", "Trim locus edges (see docs) (R1>, <R1, R2>, <R2)")
),
("27", ("""
(27) output_formats --------------------------------------------------
Examples:
----------------------------------------------------------------------
* ## [27] output_formats: * means all formats
vcf, phy, nex ## [27] list subset of formats if you want
----------------------------------------------------------------------
""", "Output formats (see docs)")
),
("28", ("""
(28) pop_assign_file -------------------------------------------------
Examples:
----------------------------------------------------------------------
./popfile.txt ## [28] pop_assign_file
/home/users/Documents/popfile.txt ## [28] pop_assign_file
----------------------------------------------------------------------
""", "Path to population assignment file")
),
("29", ("""
(29) reference_as_filter ---------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params("reference_as_filter") = ./data/reference.fa ## verbose
----------------------------------------------------------------------
""", "Reads mapped to this reference are removed in step 3")
),
])
def paramname(param=""):
""" Get the param name from the dict index value.
"""
try:
name = pinfo[str(param)][0].strip().split(" ")[1]
except (KeyError, ValueError) as err:
## TODO: paramsinfo get description by param string not working.
## It would be cool to have an assembly object bcz then you could
## just do this:
##
## print(pinfo[data.paramsinfo.keys().index(param)])
print("\tKey name/number not recognized - ".format(param), err)
raise
return name
def paraminfo(param="", short=False):
""" Returns detailed information for the numbered parameter.
Further information is available in the tutorial.
Unlike params() this function doesn't deal well with *
It only takes one parameter at a time and returns the desc
"""
## If the short flag is set return the short description, otherwise
## return the long.
if short:
desc = 1
else:
desc = 0
try:
description = pinfo[str(param)][desc]
except (KeyError, ValueError) as err:
## TODO: paramsinfo get description by param string not working.
## It would be cool to have an assembly object bcz then you could
## just do this:
##
## print(pinfo[data.paramsinfo.keys().index(param)])
print("\tKey name/number not recognized - ".format(param), err)
raise
return description
def paramsinfo(param="", short=False):
""" This is the human readable version of the paramsinfo() function.
You give it a param and it prints to stdout.
"""
if short:
desc = 1
else:
desc = 0
if param == "*":
for key in pinfo:
print(pinfo[str(key)][desc])
elif param:
try:
print(pinfo[str(param)][desc])
except (KeyError, ValueError) as err:
## TODO: paramsinfo get description by param string not working.
## It would be cool to have an assembly object bcz then you could
## just do this:
##
## print(pinfo[data.paramsinfo.keys().index(param)])
print("\tKey name/number not recognized", err)
raise
else:
print("Enter a name or number for explanation of the parameter\n")
for key in pinfo:
print(pinfo[str(key)][desc].split("\n")[1][2:-10])
if __name__ == "__main__":
pass
| gpl-3.0 | 1,552,804,021,922,965,200 | 44.298969 | 82 | 0.454802 | false | 4.884393 | false | false | false |
pshchelo/heat | heat/engine/resources/stack_resource.py | 1 | 20765 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import hashlib
import json
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.common import identifier
from heat.common import template_format
from heat.engine import attributes
from heat.engine import environment
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.engine import template
from heat.rpc import api as rpc_api
LOG = logging.getLogger(__name__)
class StackResource(resource.Resource):
'''
An abstract Resource subclass that allows the management of an entire Stack
as a resource in a parent stack.
'''
# Assume True as this is evaluated before the stack is created
# so there is no way to know for sure without subclass-specific
# template parsing.
requires_deferred_auth = True
def __init__(self, name, json_snippet, stack):
super(StackResource, self).__init__(name, json_snippet, stack)
self._nested = None
self.resource_info = None
def validate(self):
super(StackResource, self).validate()
self.validate_nested_stack()
def validate_nested_stack(self):
try:
name = "%s-%s" % (self.stack.name, self.name)
nested_stack = self._parse_nested_stack(
name,
self.child_template(),
self.child_params())
nested_stack.strict_validate = False
nested_stack.validate()
except AssertionError:
raise
except Exception as ex:
raise exception.StackValidationFailed(
error=_("Failed to validate"),
path=[self.stack.t.get_section_name('resources'), self.name],
message=six.text_type(ex))
def _outputs_to_attribs(self, json_snippet):
outputs = json_snippet.get('Outputs')
if not self.attributes and outputs:
self.attributes_schema = (
attributes.Attributes.schema_from_outputs(outputs))
self.attributes = attributes.Attributes(self.name,
self.attributes_schema,
self._resolve_attribute)
def _needs_update(self, after, before, after_props, before_props,
prev_resource):
# Always issue an update to the nested stack and let the individual
# resources in it decide if they need updating.
return True
@scheduler.wrappertask
def update(self, after, before=None, prev_resource=None):
try:
yield super(StackResource, self).update(after, before,
prev_resource)
except StopIteration:
with excutils.save_and_reraise_exception():
stack_identity = identifier.HeatIdentifier(
self.context.tenant_id,
self.physical_resource_name(),
self.resource_id)
self.rpc_client().stack_cancel_update(self.context,
stack_identity)
def nested(self, force_reload=False, show_deleted=False):
'''Return a Stack object representing the nested (child) stack.
:param force_reload: Forces reloading from the DB instead of returning
the locally cached Stack object
:param show_deleted: Returns the stack even if it's been deleted
'''
if force_reload:
self._nested = None
if self._nested is None and self.resource_id is not None:
self._nested = parser.Stack.load(self.context,
self.resource_id,
show_deleted=show_deleted,
force_reload=force_reload)
if self._nested is None:
raise exception.NotFound(_("Nested stack not found in DB"))
return self._nested
def child_template(self):
'''
Default implementation to get the child template.
Resources that inherit from StackResource should override this method
with specific details about the template used by them.
'''
raise NotImplementedError()
def child_params(self):
'''
Default implementation to get the child params.
Resources that inherit from StackResource should override this method
with specific details about the parameters used by them.
'''
raise NotImplementedError()
def preview(self):
'''
Preview a StackResource as resources within a Stack.
This method overrides the original Resource.preview to return a preview
of all the resources contained in this Stack. For this to be possible,
the specific resources need to override both ``child_template`` and
``child_params`` with specific information to allow the stack to be
parsed correctly. If any of these methods is missing, the entire
StackResource will be returned as if it were a regular Resource.
'''
try:
child_template = self.child_template()
params = self.child_params()
except NotImplementedError:
LOG.warn(_LW("Preview of '%s' not yet implemented"),
self.__class__.__name__)
return self
name = "%s-%s" % (self.stack.name, self.name)
self._nested = self._parse_nested_stack(name, child_template, params)
return self.nested().preview_resources()
def _parse_child_template(self, child_template, child_env):
parsed_child_template = child_template
if isinstance(parsed_child_template, template.Template):
parsed_child_template = parsed_child_template.t
return template.Template(parsed_child_template,
files=self.stack.t.files, env=child_env)
def _parse_nested_stack(self, stack_name, child_template,
child_params, timeout_mins=None,
adopt_data=None):
if timeout_mins is None:
timeout_mins = self.stack.timeout_mins
stack_user_project_id = self.stack.stack_user_project_id
new_nested_depth = self._child_nested_depth()
child_env = environment.get_child_environment(
self.stack.env, child_params,
child_resource_name=self.name,
item_to_remove=self.resource_info)
parsed_template = self._child_parsed_template(child_template,
child_env)
# Note we disable rollback for nested stacks, since they
# should be rolled back by the parent stack on failure
nested = parser.Stack(self.context,
stack_name,
parsed_template,
timeout_mins=timeout_mins,
disable_rollback=True,
parent_resource=self.name,
owner_id=self.stack.id,
user_creds_id=self.stack.user_creds_id,
stack_user_project_id=stack_user_project_id,
adopt_stack_data=adopt_data,
nested_depth=new_nested_depth)
return nested
def _child_nested_depth(self):
if self.stack.nested_depth >= cfg.CONF.max_nested_stack_depth:
msg = _("Recursion depth exceeds %d."
) % cfg.CONF.max_nested_stack_depth
raise exception.RequestLimitExceeded(message=msg)
return self.stack.nested_depth + 1
def _child_parsed_template(self, child_template, child_env):
parsed_template = self._parse_child_template(child_template, child_env)
self._validate_nested_resources(parsed_template)
# Don't overwrite the attributes_schema for subclasses that
# define their own attributes_schema.
if not hasattr(type(self), 'attributes_schema'):
self.attributes = None
self._outputs_to_attribs(parsed_template)
return parsed_template
def _validate_nested_resources(self, templ):
total_resources = (len(templ[templ.RESOURCES]) +
self.stack.root_stack.total_resources())
if self.nested():
# It's an update and these resources will be deleted
total_resources -= len(self.nested().resources)
if (total_resources > cfg.CONF.max_resources_per_stack):
message = exception.StackResourceLimitExceeded.msg_fmt
raise exception.RequestLimitExceeded(message=message)
def create_with_template(self, child_template, user_params=None,
timeout_mins=None, adopt_data=None):
"""Create the nested stack with the given template."""
name = self.physical_resource_name()
if timeout_mins is None:
timeout_mins = self.stack.timeout_mins
stack_user_project_id = self.stack.stack_user_project_id
if user_params is None:
user_params = self.child_params()
child_env = environment.get_child_environment(
self.stack.env,
user_params,
child_resource_name=self.name,
item_to_remove=self.resource_info)
new_nested_depth = self._child_nested_depth()
parsed_template = self._child_parsed_template(child_template,
child_env)
adopt_data_str = None
if adopt_data is not None:
if 'environment' not in adopt_data:
adopt_data['environment'] = child_env.user_env_as_dict()
if 'template' not in adopt_data:
adopt_data['template'] = child_template
adopt_data_str = json.dumps(adopt_data)
args = {rpc_api.PARAM_TIMEOUT: timeout_mins,
rpc_api.PARAM_DISABLE_ROLLBACK: True,
rpc_api.PARAM_ADOPT_STACK_DATA: adopt_data_str}
try:
result = self.rpc_client()._create_stack(
self.context,
name,
parsed_template.t,
child_env.user_env_as_dict(),
parsed_template.files,
args,
owner_id=self.stack.id,
user_creds_id=self.stack.user_creds_id,
stack_user_project_id=stack_user_project_id,
nested_depth=new_nested_depth,
parent_resource_name=self.name)
except Exception as ex:
self.raise_local_exception(ex)
self.resource_id_set(result['stack_id'])
def raise_local_exception(self, ex):
ex_type = ex.__class__.__name__
is_remote = ex_type.endswith('_Remote')
if is_remote:
ex_type = ex_type[:-len('_Remote')]
full_message = six.text_type(ex)
if full_message.find('\n') > -1 and is_remote:
message, msg_trace = full_message.split('\n', 1)
else:
message = full_message
if (isinstance(ex, exception.ActionInProgress) and
self.stack.action == self.stack.ROLLBACK):
# The update was interrupted and the rollback is already in
# progress, so just ignore the error and wait for the rollback to
# finish
return
if isinstance(ex, exception.HeatException):
message = ex.message
local_ex = copy.copy(getattr(exception, ex_type))
local_ex.msg_fmt = "%(message)s"
raise local_ex(message=message)
def check_create_complete(self, cookie=None):
return self._check_status_complete(resource.Resource.CREATE)
def _check_status_complete(self, action, show_deleted=False,
cookie=None):
try:
nested = self.nested(force_reload=True, show_deleted=show_deleted)
except exception.NotFound:
if action == resource.Resource.DELETE:
return True
# It's possible the engine handling the create hasn't persisted
# the stack to the DB when we first start polling for state
return False
if nested is None:
return True
if nested.action != action:
return False
# Has the action really started?
#
# The rpc call to update does not guarantee that the stack will be
# placed into IN_PROGRESS by the time it returns (it runs stack.update
# in a thread) so you could also have a situation where we get into
# this method and the update hasn't even started.
#
# So we are using a mixture of state (action+status) and updated_at
# to see if the action has actually progressed.
# - very fast updates (like something with one RandomString) we will
# probably miss the state change, but we should catch the updated_at.
# - very slow updates we won't see the updated_at for quite a while,
# but should see the state change.
if cookie is not None:
prev_state = cookie['previous']['state']
prev_updated_at = cookie['previous']['updated_at']
if (prev_updated_at == nested.updated_time and
prev_state == nested.state):
return False
if nested.status == resource.Resource.IN_PROGRESS:
return False
elif nested.status == resource.Resource.COMPLETE:
return True
elif nested.status == resource.Resource.FAILED:
raise resource.ResourceUnknownStatus(
resource_status=nested.status,
status_reason=nested.status_reason)
else:
raise resource.ResourceUnknownStatus(
resource_status=nested.status,
result=_('Stack unknown status'))
def check_adopt_complete(self, cookie=None):
return self._check_status_complete(resource.Resource.ADOPT)
def update_with_template(self, child_template, user_params=None,
timeout_mins=None):
"""Update the nested stack with the new template."""
if self.id is None:
self._store()
nested_stack = self.nested()
if nested_stack is None:
# if the create failed for some reason and the nested
# stack was not created, we need to create an empty stack
# here so that the update will work.
def _check_for_completion(creator_fn):
while not self.check_create_complete(creator_fn):
yield
empty_temp = template_format.parse(
"heat_template_version: '2013-05-23'")
stack_creator = self.create_with_template(empty_temp, {})
checker = scheduler.TaskRunner(_check_for_completion,
stack_creator)
checker(timeout=self.stack.timeout_secs())
if stack_creator is not None:
stack_creator.run_to_completion()
nested_stack = self.nested()
if timeout_mins is None:
timeout_mins = self.stack.timeout_mins
if user_params is None:
user_params = self.child_params()
child_env = environment.get_child_environment(
self.stack.env,
user_params,
child_resource_name=self.name,
item_to_remove=self.resource_info)
parsed_template = self._child_parsed_template(child_template,
child_env)
cookie = {'previous': {
'updated_at': nested_stack.updated_time,
'state': nested_stack.state}}
args = {rpc_api.PARAM_TIMEOUT: timeout_mins}
try:
self.rpc_client().update_stack(
self.context,
nested_stack.identifier(),
parsed_template.t,
child_env.user_env_as_dict(),
parsed_template.files,
args)
except Exception as ex:
LOG.exception('update_stack')
self.raise_local_exception(ex)
return cookie
def check_update_complete(self, cookie=None):
return self._check_status_complete(resource.Resource.UPDATE,
cookie=cookie)
def delete_nested(self):
'''
Delete the nested stack.
'''
stack_identity = identifier.HeatIdentifier(
self.context.tenant_id,
self.physical_resource_name(),
self.resource_id)
try:
self.rpc_client().delete_stack(self.context, stack_identity)
except Exception as ex:
self.rpc_client().ignore_error_named(ex, 'NotFound')
def check_delete_complete(self, cookie=None):
return self._check_status_complete(resource.Resource.DELETE,
show_deleted=True)
def handle_suspend(self):
stack = self.nested()
if stack is None:
raise exception.Error(_('Cannot suspend %s, stack not created')
% self.name)
stack_identity = identifier.HeatIdentifier(
self.context.tenant_id,
self.physical_resource_name(),
self.resource_id)
self.rpc_client().stack_suspend(self.context, stack_identity)
def check_suspend_complete(self, cookie=None):
return self._check_status_complete(resource.Resource.SUSPEND)
def handle_resume(self):
stack = self.nested()
if stack is None:
raise exception.Error(_('Cannot resume %s, stack not created')
% self.name)
stack_identity = identifier.HeatIdentifier(
self.context.tenant_id,
self.physical_resource_name(),
self.resource_id)
self.rpc_client().stack_resume(self.context, stack_identity)
def check_resume_complete(self, cookie=None):
return self._check_status_complete(resource.Resource.RESUME)
def handle_check(self):
stack = self.nested()
if stack is None:
raise exception.Error(_('Cannot check %s, stack not created')
% self.name)
stack_identity = identifier.HeatIdentifier(
self.context.tenant_id,
self.physical_resource_name(),
self.resource_id)
self.rpc_client().stack_check(self.context, stack_identity)
def check_check_complete(self, cookie=None):
return self._check_status_complete(resource.Resource.CHECK)
def prepare_abandon(self):
return self.nested().prepare_abandon()
def get_output(self, op):
'''
Return the specified Output value from the nested stack.
If the output key does not exist, raise an InvalidTemplateAttribute
exception.
'''
stack = self.nested()
if stack is None:
return None
if op not in stack.outputs:
raise exception.InvalidTemplateAttribute(resource=self.name,
key=op)
return stack.output(op)
def _resolve_attribute(self, name):
return self.get_output(name)
def implementation_signature(self):
schema_names = ([prop for prop in self.properties_schema] +
[at for at in self.attributes_schema])
schema_hash = hashlib.sha256(';'.join(schema_names))
definition = {'template': self.child_template(),
'files': self.stack.t.files}
definition_hash = hashlib.sha256(jsonutils.dumps(definition))
return (schema_hash.hexdigest(), definition_hash.hexdigest())
| apache-2.0 | -4,581,617,663,593,579,000 | 38.932692 | 79 | 0.584589 | false | 4.578831 | false | false | false |
hds-lab/coding-ml | msgvis/apps/enhance/migrations/0002_auto_20160222_0230.py | 1 | 1306 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('enhance', '0001_initial'),
(b'auth', b'__first__'), # This line and the next line is for fixing the "Lookup failed auth.User" error
(b'contenttypes', b'__first__'),
]
operations = [
migrations.AddField(
model_name='feature',
name='created_at',
field=models.DateTimeField(default=None, auto_now_add=True),
preserve_default=True,
),
migrations.AddField(
model_name='feature',
name='last_updated',
field=models.DateTimeField(default=None, auto_now=True, auto_now_add=True),
preserve_default=True,
),
migrations.AddField(
model_name='feature',
name='valid',
field=models.BooleanField(default=True),
preserve_default=True,
),
migrations.AlterField(
model_name='feature',
name='source',
field=models.ForeignKey(related_name='features', default=None, to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
| mit | -7,770,648,743,024,572,000 | 30.853659 | 119 | 0.570444 | false | 4.367893 | false | false | false |
jj1bdx/wspr | WsprMod/iq.py | 1 | 4726 | #------------------------------------------------------------------ iq
from Tkinter import *
import Pmw
import g
import w
import time
import tkMessageBox
import pickle
def done():
root.withdraw()
root=Toplevel()
root.withdraw()
root.protocol('WM_DELETE_WINDOW',done)
if g.Win32: root.iconbitmap("wsjt.ico")
root.title("I-Q Mode")
def iq2(t):
root.geometry(t)
root.deiconify()
root.focus_set()
j=ib.get()
lab0.configure(text=str(mb[j])+' m')
iqmode=IntVar()
iqrx=IntVar()
iqtx=IntVar()
fiq=IntVar()
iqrxapp=IntVar()
iqrxadj=IntVar()
isc2=IntVar()
isc2.set(0)
isc2a=IntVar()
isc2a.set(0)
isc3=IntVar()
isc3.set(0)
isc3a=IntVar()
isc3a.set(0)
ib=IntVar()
gain=DoubleVar()
phdeg=DoubleVar()
mb=[0,600,160,80,60,40,30,20,17,15,12,10,6,4,2,0]
tbal=[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
tpha=[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
rbal=[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0]
rpha=[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
allbands=0
def saveband(event=NONE):
global allbands,tbal,tpha,rbal,rpha
if allbands:
for j in range(1,15):
tbal[j]=isc2.get() + 0.02*isc2a.get()
tpha[j]=isc3.get() + 0.02*isc3a.get()
rbal[j]=w.acom1.gain
rpha[j]=57.2957795*w.acom1.phase
else:
j=ib.get()
tbal[j]=isc2.get() + 0.02*isc2a.get()
tpha[j]=isc3.get() + 0.02*isc3a.get()
rbal[j]=w.acom1.gain
rpha[j]=57.2957795*w.acom1.phase
f=open(g.appdir+'/iqpickle',mode='w')
pickle.dump(tbal,f)
pickle.dump(tpha,f)
pickle.dump(rbal,f)
pickle.dump(rpha,f)
f.close()
def saveall(event=NONE):
global allbands
allbands=1
saveband()
allbands=0
def restore():
global tbal,tpha,rbal,rpha
try:
f=open(g.appdir+'/iqpickle',mode='r')
tbal=pickle.load(f)
tpha=pickle.load(f)
rbal=pickle.load(f)
rpha=pickle.load(f)
f.close()
except:
pass
newband()
def newband():
j=ib.get()
lab0.configure(text=str(mb[j])+' m')
w.acom1.gain=rbal[j]
w.acom1.phase=rpha[j]/57.2957795
isc2.set(int(tbal[j]))
isc2a.set(int((tbal[j]-isc2.get())/0.02))
isc3.set(int(tpha[j]))
isc3a.set(int((tpha[j]-isc3.get())/0.02))
#-------------------------------------------------------- Create GUI widgets
g1=Pmw.Group(root,tag_pyclass=None)
lab0=Label(g1.interior(),text='160 m',bg='yellow',pady=5)
lab0.place(x=180,y=40, anchor='e')
#lab0.pack(anchor=W,padx=5,pady=4)
biqmode=Checkbutton(g1.interior(),text='Enable I/Q mode',variable=iqmode)
biqmode.pack(anchor=W,padx=5,pady=2)
biqtx=Checkbutton(g1.interior(),text='Reverse Tx I,Q',variable=iqtx)
biqtx.pack(anchor=W,padx=5,pady=2)
biqrx=Checkbutton(g1.interior(),text='Reverse Rx I,Q',variable=iqrx)
biqrx.pack(anchor=W,padx=5,pady=2)
biqrxapp=Checkbutton(g1.interior(),text='Apply Rx phasing corrections', \
variable=iqrxapp)
biqrxapp.pack(anchor=W,padx=5,pady=2)
biqrxadj=Checkbutton(g1.interior(),text='Adjust Rx phasing', \
variable=iqrxadj)
biqrxadj.pack(anchor=W,padx=5,pady=2)
lab1=Label(g1.interior(),text='',justify=LEFT)
lab1.pack(anchor=W,padx=5,pady=4)
fiq_entry=Pmw.EntryField(g1.interior(),labelpos=W,label_text='Fiq (Hz): ',
value='12000',entry_textvariable=fiq,entry_width=10,
validate={'validator':'integer','min':-24000,'max':24000,
'minstrict':0,'maxstrict':0})
fiq_entry.pack(fill=X,padx=2,pady=4)
sc2=Scale(g1.interior(),orient=HORIZONTAL,length=200,from_=-30, \
to=30,variable=isc2,label='Tx I/Q Balance (0.1 dB)', \
relief=SOLID,bg='#EEDD82')
sc2.pack(side=TOP,padx=4,pady=2)
sc2a=Scale(g1.interior(),orient=HORIZONTAL,length=200,from_=-50, \
to=50,variable=isc2a,label='Tx I/Q Balance (0.002 dB)', \
relief=SOLID,bg='#EEDD82')
sc2a.pack(side=TOP,padx=4,pady=2)
sc3=Scale(g1.interior(),orient=HORIZONTAL,length=200,from_=-20, \
to=20,variable=isc3,label='Tx Phase (deg)', \
relief=SOLID,bg='#AFeeee')
sc3.pack(side=TOP,padx=4,pady=2)
sc3a=Scale(g1.interior(),orient=HORIZONTAL,length=200,from_=-50, \
to=50,variable=isc3a,label='Tx Phase (0.02 deg)', \
relief=SOLID,bg='#AFeeee')
sc3a.pack(side=TOP,padx=4,pady=2)
bsave=Button(g1.interior(), text='Save for this band',command=saveband,
width=32,padx=1,pady=2)
bsave.pack(padx=2,pady=4)
bsaveall=Button(g1.interior(), text='Save for all bands',command=saveall,
width=32,padx=1,pady=2)
bsaveall.pack(padx=2,pady=4)
f1=Frame(g1.interior(),width=100,height=1)
f1.pack()
g1.pack(side=LEFT,fill=BOTH,expand=1,padx=4,pady=4)
| gpl-2.0 | -423,385,164,766,437,600 | 27.46988 | 82 | 0.621244 | false | 2.233459 | false | false | false |
zestyr/lbry | lbrynet/core/server/ServerRequestHandler.py | 1 | 5866 | import json
import logging
from twisted.internet import interfaces, defer
from zope.interface import implements
from lbrynet.interfaces import IRequestHandler
log = logging.getLogger(__name__)
class ServerRequestHandler(object):
"""This class handles requests from clients. It can upload blobs and
return request for information about more blobs that are
associated with streams.
"""
implements(interfaces.IPushProducer, interfaces.IConsumer, IRequestHandler)
def __init__(self, consumer):
self.consumer = consumer
self.production_paused = False
self.request_buff = ''
self.response_buff = ''
self.producer = None
self.request_received = False
self.CHUNK_SIZE = 2**14
self.query_handlers = {} # {IQueryHandler: [query_identifiers]}
self.blob_sender = None
self.consumer.registerProducer(self, True)
#IPushProducer stuff
def pauseProducing(self):
self.production_paused = True
def stopProducing(self):
if self.producer is not None:
self.producer.stopProducing()
self.producer = None
self.production_paused = True
self.consumer.unregisterProducer()
def resumeProducing(self):
from twisted.internet import reactor
self.production_paused = False
self._produce_more()
if self.producer is not None:
reactor.callLater(0, self.producer.resumeProducing)
def _produce_more(self):
from twisted.internet import reactor
if self.production_paused:
return
chunk = self.response_buff[:self.CHUNK_SIZE]
self.response_buff = self.response_buff[self.CHUNK_SIZE:]
if chunk == '':
return
log.trace("writing %s bytes to the client", len(chunk))
self.consumer.write(chunk)
reactor.callLater(0, self._produce_more)
#IConsumer stuff
def registerProducer(self, producer, streaming):
self.producer = producer
assert streaming is False
producer.resumeProducing()
def unregisterProducer(self):
self.producer = None
def write(self, data):
from twisted.internet import reactor
self.response_buff = self.response_buff + data
self._produce_more()
def get_more_data():
if self.producer is not None:
log.trace("Requesting more data from the producer")
self.producer.resumeProducing()
reactor.callLater(0, get_more_data)
#From Protocol
def data_received(self, data):
log.debug("Received data")
log.debug("%s", str(data))
if self.request_received is False:
return self._parse_data_and_maybe_send_blob(data)
else:
log.warning(
"The client sent data when we were uploading a file. This should not happen")
def _parse_data_and_maybe_send_blob(self, data):
self.request_buff = self.request_buff + data
msg = self.try_to_parse_request(self.request_buff)
if msg:
self.request_buff = ''
self._process_msg(msg)
else:
log.debug("Request buff not a valid json message")
log.debug("Request buff: %s", self.request_buff)
def _process_msg(self, msg):
d = self.handle_request(msg)
if self.blob_sender:
d.addCallback(lambda _: self.blob_sender.send_blob_if_requested(self))
d.addCallbacks(lambda _: self.finished_response(), self.request_failure_handler)
######### IRequestHandler #########
def register_query_handler(self, query_handler, query_identifiers):
self.query_handlers[query_handler] = query_identifiers
def register_blob_sender(self, blob_sender):
self.blob_sender = blob_sender
#response handling
def request_failure_handler(self, err):
log.warning("An error occurred handling a request. Error: %s", err.getErrorMessage())
self.stopProducing()
return err
def finished_response(self):
self.request_received = False
self._produce_more()
def send_response(self, msg):
m = json.dumps(msg)
log.debug("Sending a response of length %s", str(len(m)))
log.debug("Response: %s", str(m))
self.response_buff = self.response_buff + m
self._produce_more()
return True
def handle_request(self, msg):
log.debug("Handling a request")
log.debug(str(msg))
def create_response_message(results):
response = {}
for success, result in results:
if success is True:
response.update(result)
else:
# result is a Failure
return result
log.debug("Finished making the response message. Response: %s", str(response))
return response
def log_errors(err):
log.warning(
"An error occurred handling a client request. Error message: %s",
err.getErrorMessage())
return err
def send_response(response):
self.send_response(response)
return True
ds = []
for query_handler, query_identifiers in self.query_handlers.iteritems():
queries = {q_i: msg[q_i] for q_i in query_identifiers if q_i in msg}
d = query_handler.handle_queries(queries)
d.addErrback(log_errors)
ds.append(d)
dl = defer.DeferredList(ds)
dl.addCallback(create_response_message)
dl.addCallback(send_response)
return dl
def try_to_parse_request(self, request_buff):
try:
msg = json.loads(request_buff)
return msg
except ValueError:
return None
| mit | 8,375,703,638,704,539,000 | 30.537634 | 93 | 0.608421 | false | 4.335551 | false | false | false |
diegodelemos/reana-job-controller | tests/test_job_manager.py | 1 | 4870 | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2019 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA-Job-Controller Job Manager tests."""
import json
import os
import uuid
import mock
import pytest
from reana_db.models import Job, JobStatus
from reana_job_controller.job_manager import JobManager
from reana_job_controller.kubernetes_job_manager import KubernetesJobManager
def test_execute_kubernetes_job(
app,
session,
sample_serial_workflow_in_db,
sample_workflow_workspace,
default_user,
empty_user_secrets,
corev1_api_client_with_user_secrets,
monkeypatch,
):
"""Test execution of Kubernetes job."""
workflow_uuid = sample_serial_workflow_in_db.id_
workflow_workspace = next(sample_workflow_workspace(str(workflow_uuid)))
env_var_key = "key"
env_var_value = "value"
expected_env_var = {env_var_key: env_var_value}
expected_image = "busybox"
expected_command = ["ls"]
monkeypatch.setenv("REANA_USER_ID", str(default_user.id_))
job_manager = KubernetesJobManager(
docker_img=expected_image,
cmd=expected_command,
env_vars=expected_env_var,
workflow_uuid=workflow_uuid,
workflow_workspace=workflow_workspace,
)
with mock.patch(
"reana_job_controller.kubernetes_job_manager." "current_k8s_batchv1_api_client"
) as kubernetes_client:
with mock.patch(
"reana_commons.k8s.secrets." "current_k8s_corev1_api_client",
corev1_api_client_with_user_secrets(empty_user_secrets),
):
kubernetes_job_id = job_manager.execute()
created_job = (
session.query(Job)
.filter_by(backend_job_id=kubernetes_job_id)
.one_or_none()
)
assert created_job
assert created_job.docker_img == expected_image
assert created_job.cmd == json.dumps(expected_command)
assert json.dumps(expected_env_var) in created_job.env_vars
assert created_job.status == JobStatus.created
kubernetes_client.create_namespaced_job.assert_called_once()
body = kubernetes_client.create_namespaced_job.call_args[1]["body"]
env_vars = body["spec"]["template"]["spec"]["containers"][0]["env"]
image = body["spec"]["template"]["spec"]["containers"][0]["image"]
command = body["spec"]["template"]["spec"]["containers"][0]["command"]
assert len(env_vars) == 3
assert {"name": env_var_key, "value": env_var_value} in env_vars
assert image == expected_image
assert command == expected_command
def test_stop_kubernetes_job(
app,
session,
sample_serial_workflow_in_db,
sample_workflow_workspace,
empty_user_secrets,
default_user,
corev1_api_client_with_user_secrets,
monkeypatch,
):
"""Test stop of Kubernetes job."""
workflow_uuid = sample_serial_workflow_in_db.id_
workflow_workspace = next(sample_workflow_workspace(str(workflow_uuid)))
expected_env_var_name = "env_var"
expected_env_var_value = "value"
expected_image = "busybox"
expected_command = ["ls"]
monkeypatch.setenv("REANA_USER_ID", str(default_user.id_))
job_manager = KubernetesJobManager(
docker_img=expected_image,
cmd=expected_command,
env_vars={expected_env_var_name: expected_env_var_value},
workflow_uuid=workflow_uuid,
workflow_workspace=workflow_workspace,
)
with mock.patch(
"reana_job_controller.kubernetes_job_manager." "current_k8s_batchv1_api_client"
) as kubernetes_client:
with mock.patch(
"reana_commons.k8s.secrets." "current_k8s_corev1_api_client",
corev1_api_client_with_user_secrets(empty_user_secrets),
):
kubernetes_job_id = job_manager.execute()
kubernetes_client.create_namespaced_job.assert_called_once()
job_manager.stop(kubernetes_job_id)
kubernetes_client.delete_namespaced_job.assert_called_once()
def test_execution_hooks():
"""Test hook execution order."""
class TestJobManger(JobManager):
@JobManager.execution_hook
def execute(self):
self.order_list.append(2)
job_id = str(uuid.uuid4())
return job_id
def before_execution(self):
self.order_list = []
self.order_list.append(1)
def create_job_in_db(self, job_id):
self.order_list.append(3)
def cache_job(self):
self.order_list.append(4)
job_manager = TestJobManger("busybox", "ls", {})
job_manager.execute()
assert job_manager.order_list == [1, 2, 3, 4]
| mit | -5,248,791,011,333,741,000 | 34.289855 | 87 | 0.634702 | false | 3.637043 | true | false | false |
kencochrane/docker-django-demo | dockerdemo/voting/migrations/0001_initial.py | 1 | 1782 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-16 20:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='date published')),
('last_update', models.DateTimeField(auto_now=True, verbose_name='last updated')),
],
),
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vote_date', models.DateTimeField(auto_now=True, verbose_name='date voted')),
('ip_address', models.GenericIPAddressField()),
('selection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='voting.Choice')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='voting.Question'),
),
]
| mit | -4,280,010,848,897,716,000 | 36.914894 | 114 | 0.574635 | false | 4.304348 | false | false | false |
todddeluca/diabric | diabric/files.py | 1 | 11396 |
'''
Fabric utilities for working with files.
'''
import StringIO
import contextlib
import os
import shutil
import subprocess
import uuid
from fabric.api import sudo, run, settings, hide, put, local
from fabric.contrib.files import exists
##################
# HELPER FUNCTIONS
# These functions are reusable snippets meant to improve the consistency
# and modularity of files.py code
def set_mode(path, mode, remote=True, use_sudo=False):
'''
To improve code consistency and composition, this function
changes the mode of `path` to `mode`.
path: the path to the file or directory whose mode is being set.
remote: indicates that filename is a located on a remote host and `run`
or `sudo` should be used to set the mode.
use_sudo: only applies when remote is True. Use `sudo` instead of `run`.
'''
func = local if not remote else sudo if use_sudo else run
func('chmod {} {}'.format(oct(mode), path))
def backup_file(filename, remote=True, use_sudo=False, extension='.bak'):
'''
filename: path to a local or remote file
If filename exists, copy filename to filename.bak
'''
func = local if not remote else sudo if use_sudo else run
if exists(filename):
func("cp %s %s.bak" % (filename, filename))
def normalize_dest(src, dest, remote=True, use_sudo=False):
'''
src: a file path
dest: a file or directory path
If dest is an existing directory, this returns a path to the basename of src within the directory dest.
Otherwise, if dest is returned unchanged.
This is useful for getting an actual filename when destination can be
a file or a directory.
'''
func = local if not remote else sudo if use_sudo else run
# Normalize dest to be an actual filename, due to using StringIO
with settings(hide('everything'), warn_only=True):
if func('test -d %s' % dest).succeeded:
dest = os.path.join(dest, os.path.basename(src))
return dest
################
# FILE FUNCTIONS
def file_template(filename, destination, context=None, use_jinja=False,
template_dir=None, backup=True, mirror_local_mode=False, mode=None):
"""
This is the local version of upload_template.
Render and copy a template text file to a local destination.
``filename`` should be the path to a text file, which may contain `Python
string interpolation formatting
<http://docs.python.org/release/2.5.4/lib/typesseq-strings.html>`_ and will
be rendered with the given context dictionary ``context`` (if given.)
Alternately, if ``use_jinja`` is set to True and you have the Jinja2
templating library available, Jinja will be used to render the template
instead. Templates will be loaded from the invoking user's current working
directory by default, or from ``template_dir`` if given.
The resulting rendered file will be written to the local file path
``destination``. If the destination file already exists, it will be
renamed with a ``.bak`` extension unless ``backup=False`` is specified.
The ``mirror_local_mode`` and ``mode`` kwargs are used in a similar
manner as in `~fabric.operations.put`; please see its documentation for
details on these two options.
"""
func = local
# make sure destination is a file name, not a directory name.
destination = normalize_dest(filename, destination, remote=False)
# grab mode before writing destination, in case filename and destination
# are the same.
if mirror_local_mode and mode is None:
# mode is numeric. See os.chmod or os.stat.
mode = os.stat(src).st_mode
# Process template
text = None
if use_jinja:
try:
from jinja2 import Environment, FileSystemLoader
jenv = Environment(loader=FileSystemLoader(template_dir or '.'))
text = jenv.get_template(filename).render(**context or {})
except ImportError:
import traceback
tb = traceback.format_exc()
abort(tb + "\nUnable to import Jinja2 -- see above.")
else:
with open(filename) as inputfile:
text = inputfile.read()
if context:
text = text % context
if backup:
backup_file(destination, remote=False)
# write the processed text
with open(destination, 'w') as fh:
fh.write(text)
if mode:
set_mode(destination, mode, remote=False)
def fix_shebang(shebang, handle):
'''
shebang: a shebang line, e.g. #!/usr/bin/env python or #!/bin/sh. If
shebang does not start with '#!', then '#!' will be prepended to it. If
shebang does not end with a newline, a newline will be appended.
handle: a iterable of lines, presumably the contents of a file that needs a
shebang line or a new shebang line.
Yield shebang and then the lines in handle except the first line in handle
if it is a shebang line.
'''
# make sure shebang is starts with '#!' and ends with a newline.
if not shebang.startswith('#!'):
shebang = '#!' + shebang
if not shebang.endswith('\n'):
shebang += '\n'
for i, line in enumerate(handle):
if i == 0:
yield shebang
if not line.startswith('#!'):
yield line
else:
yield line
def upload_shebang(filename, destination, shebang, use_sudo=False, backup=True,
mirror_local_mode=False, mode=None):
"""
Upload a text file to a remote host, adding or updating the shebang line.
``filename`` should be the path to a text file.
``shebang`` should be a string containing a shebang line. E.g.
"#!/usr/bin/python\n". If shebang does not start with '#!' or end with a
newline, these will be added.
If the first line in filename starts with '#!' it will be replaced with
shebang. If the first line does not start with #!, shebang will be
prepended to the contents of filename.
The resulting file will be uploaded to the remote file path
``destination``. If the destination file already exists, it will be
renamed with a ``.bak`` extension unless ``backup=False`` is specified.
By default, the file will be copied to ``destination`` as the logged-in
user; specify ``use_sudo=True`` to use `sudo` instead.
The ``mirror_local_mode`` and ``mode`` kwargs are passed directly to an
internal `~fabric.operations.put` call; please see its documentation for
details on these two options.
"""
func = use_sudo and sudo or run
# Normalize destination to be an actual filename, due to using StringIO
with settings(hide('everything'), warn_only=True):
if func('test -d %s' % destination).succeeded:
sep = "" if destination.endswith('/') else "/"
destination += sep + os.path.basename(filename)
# Use mode kwarg to implement mirror_local_mode, again due to using
# StringIO
if mirror_local_mode and mode is None:
mode = os.stat(filename).st_mode
# To prevent put() from trying to do this
# logic itself
mirror_local_mode = False
# process filename
text = None
with open(filename) as inputfile:
text = ''.join(fix_shebang(shebang, inputfile))
# Back up original file
if backup and exists(destination):
func("cp %s{,.bak}" % destination)
# Upload the file.
put(
local_path=StringIO.StringIO(text),
remote_path=destination,
use_sudo=use_sudo,
mirror_local_mode=mirror_local_mode,
mode=mode
)
def upload_format(filename, destination, args=None, kws=None,
use_sudo=False, backup=True, mirror_local_mode=False,
mode=None):
"""
Read in the contents of filename, format the contents via
contents.format(*args, **kws), and upload the results to the
destination on the remote host.
``filename`` should be the path to a text file. The contents of
``filename`` will be read in.
Format the contents, using contents.format(*args, **kws). If
args is None, it will not be included in the format() call.
Likewise for kws.
The resulting contents will be uploaded to the remote file path
``destination``. If the destination file already exists, it will be
renamed with a ``.bak`` extension unless ``backup=False`` is specified.
By default, the file will be copied to ``destination`` as the logged-in
user; specify ``use_sudo=True`` to use `sudo` instead.
The ``mirror_local_mode`` and ``mode`` kwargs are passed directly to an
internal `~fabric.operations.put` call; please see its documentation for
details on these two options.
"""
func = use_sudo and sudo or run
# Normalize destination to be an actual filename, due to using StringIO
with settings(hide('everything'), warn_only=True):
if func('test -d %s' % destination).succeeded:
sep = "" if destination.endswith('/') else "/"
destination += sep + os.path.basename(filename)
# Use mode kwarg to implement mirror_local_mode, again due to using
# StringIO
if mirror_local_mode and mode is None:
mode = os.stat(filename).st_mode
# To prevent put() from trying to do this
# logic itself
mirror_local_mode = False
# process filename
text = None
with open(filename) as inputfile:
if not args:
args = []
if not kws:
kws = {}
text = inputfile.read().format(*args, **kws)
# Back up original file
if backup and exists(destination):
func("cp %s{,.bak}" % destination)
# Upload the file.
put(
local_path=StringIO.StringIO(text),
remote_path=destination,
use_sudo=use_sudo,
mirror_local_mode=mirror_local_mode,
mode=mode
)
def file_format(infile, outfile, args=None, kws=None):
'''
Consider using fabric.contrib.files.upload_template or upload_format
infile: a local file path
outfile: a local file path.
Read the contents of infile as a string, ''.format() the string using args
and kws, and write the formatted string to outfile. This is useful if
infile is a "template" and args and kws contain the concrete values
for the template.
'''
if args is None:
args = []
if kws is None:
kws is {}
with open(infile) as fh:
text = fh.read()
new_text = text.format(*args, **kws)
with open(outfile, 'w') as fh2:
fh2.write(new_text)
def rsync(options, src, dest, user=None, host=None, cwd=None):
'''
Consider using fabric.contrib.project.rsync_project.
options: list of rsync options, e.g. ['--delete', '-avz']
src: source directory (or files). Note: rsync behavior varies depending on whether or not src dir ends in '/'.
dest: destination directory.
cwd: change (using subprocess) to cwd before running rsync.
This is a helper function for running rsync locally, via subprocess. Note: shell=False.
'''
# if remote user and host specified, copy there instead of locally.
if user and host:
destStr = '{}@{}:{}'.format(user, host, dest)
else:
destStr = dest
args = ['rsync'] + options + [src, destStr]
print args
subprocess.check_call(args, cwd=cwd)
| mit | 3,146,706,489,054,374,400 | 33.850153 | 115 | 0.650053 | false | 4.106667 | false | false | false |
zeroSteiner/boltons | boltons/formatutils.py | 1 | 11298 | # -*- coding: utf-8 -*-
"""`PEP 3101`_ introduced the :meth:`str.format` method, and what
would later be called "new-style" string formatting. For the sake of
explicit correctness, it is probably best to refer to Python's dual
string formatting capabilities as *bracket-style* and
*percent-style*. There is overlap, but one does not replace the
other.
* Bracket-style is more pluggable, slower, and uses a method.
* Percent-style is simpler, faster, and uses an operator.
Bracket-style formatting brought with it a much more powerful toolbox,
but it was far from a full one. :meth:`str.format` uses `more powerful
syntax`_, but `the tools and idioms`_ for working with
that syntax are not well-developed nor well-advertised.
``formatutils`` adds several functions for working with bracket-style
format strings:
* :class:`DeferredValue`: Defer fetching or calculating a value
until format time.
* :func:`get_format_args`: Parse the positional and keyword
arguments out of a format string.
* :func:`tokenize_format_str`: Tokenize a format string into
literals and :class:`BaseFormatField` objects.
* :func:`construct_format_field_str`: Assists in progammatic
construction of format strings.
* :func:`infer_positional_format_args`: Converts anonymous
references in 2.7+ format strings to explicit positional arguments
suitable for usage with Python 2.6.
.. _more powerful syntax: https://docs.python.org/2/library/string.html#format-string-syntax
.. _the tools and idioms: https://docs.python.org/2/library/string.html#string-formatting
.. _PEP 3101: https://www.python.org/dev/peps/pep-3101/
"""
# TODO: also include percent-formatting utils?
# TODO: include lithoxyl.formatters.Formatter (or some adaptation)?
from __future__ import print_function
import re
from string import Formatter
__all__ = ['DeferredValue', 'get_format_args', 'tokenize_format_str',
'construct_format_field_str', 'infer_positional_format_args',
'BaseFormatField']
_pos_farg_re = re.compile('({{)|' # escaped open-brace
'(}})|' # escaped close-brace
'({[:!.\[}])') # anon positional format arg
def construct_format_field_str(fname, fspec, conv):
"""
Constructs a format field string from the field name, spec, and
conversion character (``fname``, ``fspec``, ``conv``). See Python
String Formatting for more info.
"""
if fname is None:
return ''
ret = '{' + fname
if conv:
ret += '!' + conv
if fspec:
ret += ':' + fspec
ret += '}'
return ret
def split_format_str(fstr):
"""Does very basic spliting of a format string, returns a list of
strings. For full tokenization, see :func:`tokenize_format_str`.
"""
ret = []
for lit, fname, fspec, conv in Formatter().parse(fstr):
if fname is None:
ret.append((lit, None))
continue
field_str = construct_format_field_str(fname, fspec, conv)
ret.append((lit, field_str))
return ret
def infer_positional_format_args(fstr):
"""Takes format strings with anonymous positional arguments, (e.g.,
"{}" and {:d}), and converts them into numbered ones for explicitness and
compatibility with 2.6.
Returns a string with the inferred positional arguments.
"""
# TODO: memoize
ret, max_anon = '', 0
# look for {: or {! or {. or {[ or {}
start, end, prev_end = 0, 0, 0
for match in _pos_farg_re.finditer(fstr):
start, end, group = match.start(), match.end(), match.group()
if prev_end < start:
ret += fstr[prev_end:start]
prev_end = end
if group == '{{' or group == '}}':
ret += group
continue
ret += '{%s%s' % (max_anon, group[1:])
max_anon += 1
ret += fstr[prev_end:]
return ret
# This approach is hardly exhaustive but it works for most builtins
_INTCHARS = 'bcdoxXn'
_FLOATCHARS = 'eEfFgGn%'
_TYPE_MAP = dict([(x, int) for x in _INTCHARS] +
[(x, float) for x in _FLOATCHARS])
_TYPE_MAP['s'] = str
def get_format_args(fstr):
"""
Turn a format string into two lists of arguments referenced by the
format string. One is positional arguments, and the other is named
arguments. Each element of the list includes the name and the
nominal type of the field.
# >>> get_format_args("{noun} is {1:d} years old{punct}")
# ([(1, <type 'int'>)], [('noun', <type 'str'>), ('punct', <type 'str'>)])
# XXX: Py3k
>>> get_format_args("{noun} is {1:d} years old{punct}") == \
([(1, int)], [('noun', str), ('punct', str)])
True
"""
# TODO: memoize
formatter = Formatter()
fargs, fkwargs, _dedup = [], [], set()
def _add_arg(argname, type_char='s'):
if argname not in _dedup:
_dedup.add(argname)
argtype = _TYPE_MAP.get(type_char, str) # TODO: unicode
try:
fargs.append((int(argname), argtype))
except ValueError:
fkwargs.append((argname, argtype))
for lit, fname, fspec, conv in formatter.parse(fstr):
if fname is not None:
type_char = fspec[-1:]
fname_list = re.split('[.[]', fname)
if len(fname_list) > 1:
raise ValueError('encountered compound format arg: %r' % fname)
try:
base_fname = fname_list[0]
assert base_fname
except (IndexError, AssertionError):
raise ValueError('encountered anonymous positional argument')
_add_arg(fname, type_char)
for sublit, subfname, _, _ in formatter.parse(fspec):
# TODO: positional and anon args not allowed here.
if subfname is not None:
_add_arg(subfname)
return fargs, fkwargs
def tokenize_format_str(fstr, resolve_pos=True):
"""Takes a format string, turns it into a list of alternating string
literals and :class:`BaseFormatField` tokens. By default, also
infers anonymous positional references into explict, numbered
positional references. To disable this behavior set *resolve_pos*
to ``False``.
"""
ret = []
if resolve_pos:
fstr = infer_positional_format_args(fstr)
formatter = Formatter()
for lit, fname, fspec, conv in formatter.parse(fstr):
if lit:
ret.append(lit)
if fname is None:
continue
ret.append(BaseFormatField(fname, fspec, conv))
return ret
class BaseFormatField(object):
"""A class representing a reference to an argument inside of a
bracket-style format string. For instance, in ``"{greeting},
world!"``, there is a field named "greeting".
These fields can have many options applied to them. See the
Python docs on `Format String Syntax`_ for the full details.
.. _Format String Syntax: https://docs.python.org/2/library/string.html#string-formatting
"""
def __init__(self, fname, fspec='', conv=None):
self.set_fname(fname)
self.set_fspec(fspec)
self.set_conv(conv)
def set_fname(self, fname):
"Set the field name."
path_list = re.split('[.[]', fname) # TODO
self.base_name = path_list[0]
self.fname = fname
self.subpath = path_list[1:]
self.is_positional = not self.base_name or self.base_name.isdigit()
def set_fspec(self, fspec):
"Set the field spec."
fspec = fspec or ''
subfields = []
for sublit, subfname, _, _ in Formatter().parse(fspec):
if subfname is not None:
subfields.append(subfname)
self.subfields = subfields
self.fspec = fspec
self.type_char = fspec[-1:]
self.type_func = _TYPE_MAP.get(self.type_char, str)
def set_conv(self, conv):
"""There are only two built-in converters: ``s`` and ``r``. They are
somewhat rare and appearlike ``"{ref!r}"``."""
# TODO
self.conv = conv
self.conv_func = None # TODO
@property
def fstr(self):
"The current state of the field in string format."
return construct_format_field_str(self.fname, self.fspec, self.conv)
def __repr__(self):
cn = self.__class__.__name__
args = [self.fname]
if self.conv is not None:
args.extend([self.fspec, self.conv])
elif self.fspec != '':
args.append(self.fspec)
args_repr = ', '.join([repr(a) for a in args])
return '%s(%s)' % (cn, args_repr)
def __str__(self):
return self.fstr
_UNSET = object()
class DeferredValue(object):
""":class:`DeferredValue` is a wrapper type, used to defer computing
values which would otherwise be expensive to stringify and
format. This is most valuable in areas like logging, where one
would not want to waste time formatting a value for a log message
which will subsequently be filtered because the message's log
level was DEBUG and the logger was set to only emit CRITICAL
messages.
The :class:``DeferredValue`` is initialized with a callable that
takes no arguments and returns the value, which can be of any
type. By default DeferredValue only calls that callable once, and
future references will get a cached value. This behavior can be
disabled by setting *cache_value* to ``False``.
Args:
func (function): A callable that takes no arguments and
computes the value being represented.
cache_value (bool): Whether subsequent usages will call *func*
again. Defaults to ``True``.
>>> import sys
>>> dv = DeferredValue(lambda: len(sys._current_frames()))
>>> output = "works great in all {0} threads!".format(dv)
PROTIP: To keep lines shorter, use: ``from formatutils import
DeferredValue as DV``
"""
def __init__(self, func, cache_value=True):
self.func = func
self.cache_value = True
self._value = _UNSET
def get_value(self):
"""Computes, optionally caches, and returns the value of the
*func*. If ``get_value()`` has been called before, a cached
value may be returned depending on the *cache_value* option
passed to the constructor.
"""
if self._value is not _UNSET and self.cache_value:
value = self._value
else:
value = self.func()
if self.cache_value:
self._value = value
return value
def __int__(self):
return int(self.get_value())
def __float__(self):
return float(self.get_value())
def __str__(self):
return str(self.get_value())
def __unicode__(self):
return unicode(self.get_value())
def __repr__(self):
return repr(self.get_value())
def __format__(self, fmt):
value = self.get_value()
pt = fmt[-1:] # presentation type
type_conv = _TYPE_MAP.get(pt, str)
try:
return value.__format__(fmt)
except (ValueError, TypeError):
# TODO: this may be overkill
return type_conv(value).__format__(fmt)
# end formatutils.py
| bsd-3-clause | 5,633,073,641,725,229,000 | 33.340426 | 93 | 0.610816 | false | 3.842857 | false | false | false |
iandees/all-the-places | locations/spiders/sunloan.py | 1 | 2377 | # -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
DAYS={
'Monday':'Mo',
'Tuesday':'Tu',
'Wednesday':'We',
'Friday':'Fr',
'Thursday':'Th',
'Saturday':'Sa',
'Sunday':'Su',
}
class SunLoanSpider(scrapy.Spider):
name = "sunloan"
allowed_domains = ["sunloan.com"]
start_urls = (
'https://www.sunloan.com/locations/',
)
download_delay = 0.5
def parse(self, response):
urls = response.xpath('//div[@id="custom-locations-2"]//div[@class="location-box"]/div/p/strong/a/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
def parse_store(self, response):
try:
data = response.xpath('//script[contains(text(),"latitude")]/text()').extract_first()
data = json.loads(data)
except TypeError:
return
except json.JSONDecodeError:
data = data.replace('"hasMap": \r\n', '')
data = json.loads(data)
if not data:
return
properties = {
'lat' : float(data['geo']['latitude']),
'lon' : float(data['geo']['longitude']),
'website' : response.url,
'ref' : response.url,
'addr_full' : data['address']['streetAddress'],
'city' : data['address']['addressLocality'],
'state' : data['address']['addressRegion'],
'postcode' : data['address']['postalCode'],
'country' : 'US',
'name' : data['name'],
}
try:
hours = data['openingHours']
if hours:
properties['opening_hours'] = hours
except:
pass
yield GeojsonPointItem(**properties)
# yield GeojsonPointItem(
# lat=float(data['geo']['latitude']),
# lon=float(data['geo']['longitude']),
# website=response.url,
# ref=response.url,
# #opening_hours=data['openingHours'],
# addr_full=data['address']['streetAddress'],
# city=data['address']['addressLocality'],
# state=data['address']['addressRegion'],
# postcode=data['address']['postalCode'],
# country='US',
# name=data['name'],
# )
| mit | 2,616,339,130,988,153,000 | 27.987805 | 123 | 0.519983 | false | 3.90312 | false | false | false |
PiaBianca/PyMaster | pymasterlib/ask.py | 1 | 1995 | # PyMaster
# Copyright (C) 2014, 2015 FreedomOfRestriction <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pymasterlib as lib
from pymasterlib.constants import *
def load_text(ID):
return lib.message.load_text("ask", ID)
def what():
m = load_text("ask_what")
c = [load_text("choice_rules"), load_text("choice_chore"),
load_text("choice_punishments"), load_text("choice_nothing")]
choice = lib.message.get_choice(m, c, len(c) - 1)
if choice == 0:
lib.scripts.show_rules()
elif choice == 1:
chore()
elif choice == 2:
punishments()
def chore():
if lib.slave.queued_chore is not None:
lib.message.show(lib.slave.queued_chore["text"])
else:
lib.message.show(load_text("no_chore"))
def punishments():
lib.slave.forget()
punishments_ = []
for i in lib.slave.misdeeds:
for misdeed in lib.slave.misdeeds[i]:
if not misdeed["punished"] and misdeed["punishment"] is not None:
punishments_.append(misdeed["punishment"])
if punishments_:
if len(punishments_) > 1:
m = load_text("punishments").format(len(punishments_))
lib.message.show(m)
for punishment in punishments_:
lib.message.show(punishment["text"])
else:
lib.message.show(load_text("no_punishments"))
| gpl-3.0 | -6,075,815,982,666,345,000 | 30.666667 | 86 | 0.66416 | false | 3.421955 | false | false | false |
dark1729dragon/pixutils | pixutils/vplayer/PlayImgs.py | 1 | 2169 | from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals
from .videoutils import *
def default_labeller(x):
try:
return int(x.split('_')[1].split('.')[0])
except:
try:
return int(basename(x).split('_')[1].split('.')[0])
except:
return 0
class Im2Video():
'''
def labeller(impath):
return impath.replace('.jpg','').split('_')[1]
vpath = join(dbpath, r'videoimgs/*.*')
cam = Imgs2Video(vpath, labeller)
video = Player(cam)
cam = ThreadIt(cam)
imshow = win(video)
for fno, img in video.play():
imshow('show_video', img, 1)
'''
def __init__(self, opaths, labeller=None):
labeller = labeller or default_labeller
if type(opaths) not in (list, tuple):
paths = glob(opaths)
else:
paths = opaths
if not paths:
raise Exception('No file found in %s' % opaths)
paths = [(int(labeller(path)), path) for path in paths]
self.paths = sorted(paths, key=lambda x: x[0])
self.frameno, self.paths = list(zip(*self.paths))
self.row, self.col = cv2.imread(self.paths[0]).shape[:2]
self.index = -1
def release(self):
pass
def read(self):
self.index += 1
if len(self.paths) <= self.index:
return False, None
try:
return True, cv2.imread(self.paths[self.index])
except:
return None, None
def get(self, i):
if i == 3:
return self.col
elif i == 4:
return self.row
elif i == 5:
return 30
elif i == 7:
return len(self.paths)
def set(self, i, start_frame):
self.index += (start_frame - 1)
def GetFeed(vpath, *a, **kw):
if type(vpath) == int:
return 'stream', cv2.VideoCapture(vpath)
elif type(vpath) in (list,tuple) or '*' in vpath:
return 'imgs', Im2Video(vpath, *a, **kw)
else:
assert exists(vpath), 'Video File missing: %s' % vpath
return 'video', cv2.VideoCapture(vpath) | bsd-2-clause | 8,355,155,217,002,081,000 | 27.552632 | 125 | 0.545874 | false | 3.464856 | false | false | false |
jonathf/chaospy | chaospy/distributions/baseclass/operator.py | 1 | 2655 | """Operator transformation."""
import numpy
import chaospy
from ..baseclass import Distribution
class OperatorDistribution(Distribution):
"""Operator transformation."""
def __init__(self, left, right, exclusion=None, repr_args=None):
if not isinstance(left, Distribution):
left = numpy.atleast_1d(left)
if left.ndim > 1:
raise chaospy.UnsupportedFeature(
"distribution operators limited to at-most 1D arrays.")
if not isinstance(right, Distribution):
right = numpy.atleast_1d(right)
if right.ndim > 1:
raise chaospy.UnsupportedFeature(
"distribution operators limited to at-most 1D arrays.")
dependencies, parameters, rotation = chaospy.declare_dependencies(
distribution=self,
parameters=dict(left=left, right=right),
is_operator=True,
)
super(OperatorDistribution, self).__init__(
parameters=parameters,
dependencies=dependencies,
exclusion=exclusion,
repr_args=repr_args,
)
self._cache_copy = {}
self._lower_cache = {}
self._upper_cache = {}
def get_parameters(self, idx, cache, assert_numerical=True):
parameters = super(OperatorDistribution, self).get_parameters(
idx, cache, assert_numerical=assert_numerical)
assert set(parameters) == {"cache", "left", "right", "idx"}
if isinstance(parameters["left"], Distribution):
parameters["left"] = parameters["left"]._get_cache(idx, cache=parameters["cache"], get=0)
elif len(parameters["left"]) > 1 and idx is not None:
parameters["left"] = parameters["left"][idx]
if isinstance(parameters["right"], Distribution):
parameters["right"] = parameters["right"]._get_cache(idx, cache=parameters["cache"], get=0)
elif len(parameters["right"]) > 1 and idx is not None:
parameters["right"] = parameters["right"][idx]
if assert_numerical:
assert (not isinstance(parameters["left"], Distribution) or
not isinstance(parameters["right"], Distribution))
if cache is not self._cache_copy:
self._cache_copy = cache
self._lower_cache = {}
self._upper_cache = {}
if idx is None:
del parameters["idx"]
return parameters
def _cache(self, idx, cache, get):
assert get == 0
parameters = self.get_parameters(idx, cache)
return self._operator(parameters["left"], parameters["right"])
| mit | 8,029,556,839,526,679,000 | 39.227273 | 103 | 0.59548 | false | 4.561856 | false | false | false |
usingnamespace/pyramid_authsanity | src/pyramid_authsanity/policy.py | 1 | 6144 | import base64
import os
from pyramid.authorization import Authenticated, Everyone
from pyramid.interfaces import IAuthenticationPolicy, IDebugLogger
from zope.interface import implementer
from .util import _find_services, _session_registered, add_vary_callback
def _clean_principal(princid):
"""Utility function that cleans up the passed in principal
This can easily also be extended for example to make sure that certain
usernames are automatically off-limits.
"""
if princid in (Authenticated, Everyone):
princid = None
return princid
_marker = object()
@implementer(IAuthenticationPolicy)
class AuthServicePolicy(object):
def _log(self, msg, methodname, request):
logger = request.registry.queryUtility(IDebugLogger)
if logger:
cls = self.__class__
classname = cls.__module__ + "." + cls.__name__
methodname = classname + "." + methodname
logger.debug(methodname + ": " + msg)
_find_services = staticmethod(_find_services) # Testing
_session_registered = staticmethod(_session_registered) # Testing
_have_session = _marker
def __init__(self, debug=False):
self.debug = debug
def unauthenticated_userid(self, request):
""" We do not allow the unauthenticated userid to be used. """
def authenticated_userid(self, request):
""" Returns the authenticated userid for this request. """
debug = self.debug
(sourcesvc, authsvc) = self._find_services(request)
request.add_response_callback(add_vary_callback(sourcesvc.vary))
try:
userid = authsvc.userid()
except Exception:
debug and self._log(
"authentication has not yet been completed",
"authenticated_userid",
request,
)
(principal, ticket) = sourcesvc.get_value()
debug and self._log(
"source service provided information: (principal: %r, ticket: %r)"
% (principal, ticket),
"authenticated_userid",
request,
)
# Verify the principal and the ticket, even if None
authsvc.verify_ticket(principal, ticket)
try:
# This should now return None or the userid
userid = authsvc.userid()
except Exception:
userid = None
debug and self._log(
"authenticated_userid returning: %r" % (userid,),
"authenticated_userid",
request,
)
return userid
def effective_principals(self, request):
""" A list of effective principals derived from request. """
debug = self.debug
effective_principals = [Everyone]
userid = self.authenticated_userid(request)
(_, authsvc) = self._find_services(request)
if userid is None:
debug and self._log(
"authenticated_userid returned %r; returning %r"
% (userid, effective_principals),
"effective_principals",
request,
)
return effective_principals
if _clean_principal(userid) is None:
debug and self._log(
(
"authenticated_userid returned disallowed %r; returning %r "
"as if it was None" % (userid, effective_principals)
),
"effective_principals",
request,
)
return effective_principals
effective_principals.append(Authenticated)
effective_principals.append(userid)
effective_principals.extend(authsvc.groups())
debug and self._log(
"returning effective principals: %r" % (effective_principals,),
"effective_principals",
request,
)
return effective_principals
def remember(self, request, principal, **kw):
""" Returns a list of headers that are to be set from the source service. """
debug = self.debug
if self._have_session is _marker:
self._have_session = self._session_registered(request)
prev_userid = self.authenticated_userid(request)
(sourcesvc, authsvc) = self._find_services(request)
request.add_response_callback(add_vary_callback(sourcesvc.vary))
value = {}
value["principal"] = principal
value["ticket"] = ticket = (
base64.urlsafe_b64encode(os.urandom(32)).rstrip(b"=").decode("ascii")
)
debug and self._log(
"Remember principal: %r, ticket: %r" % (principal, ticket),
"remember",
request,
)
authsvc.add_ticket(principal, ticket)
# Clear the previous session
if self._have_session:
if prev_userid != principal:
request.session.invalidate()
else:
# We are logging in the same user that is already logged in, we
# still want to generate a new session, but we can keep the
# existing data
data = dict(request.session.items())
request.session.invalidate()
request.session.update(data)
request.session.new_csrf_token()
return sourcesvc.headers_remember([principal, ticket])
def forget(self, request):
""" A list of headers which will delete appropriate cookies."""
debug = self.debug
if self._have_session is _marker:
self._have_session = self._session_registered(request)
(sourcesvc, authsvc) = self._find_services(request)
request.add_response_callback(add_vary_callback(sourcesvc.vary))
(_, ticket) = sourcesvc.get_value()
debug and self._log("Forgetting ticket: %r" % (ticket,), "forget", request)
authsvc.remove_ticket(ticket)
# Clear the session by invalidating it
if self._have_session:
request.session.invalidate()
return sourcesvc.headers_forget()
| isc | -7,210,039,205,602,359,000 | 32.032258 | 85 | 0.585938 | false | 4.578241 | false | false | false |
CroceRossaItaliana/jorvik | formazione/migrations/0035_auto_20190510_1149.py | 1 | 2215 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2019-05-10 11:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('formazione', '0034_auto_20190408_1047'),
]
operations = [
migrations.CreateModel(
name='RelazioneCorso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creazione', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
('ultima_modifica', models.DateTimeField(auto_now=True, db_index=True)),
('note_esplicative', models.TextField(help_text='note esplicative in relazione ai cambiamenti effettuati rispetto alla programmazione approvata in fase di pianificazione iniziale del corso.', verbose_name='Note esplicative')),
('raggiungimento_obiettivi', models.TextField(help_text="Analisi sul raggiungimento degli obiettivi del corso (generali rispetto all'evento e specifici di apprendimento).", verbose_name='Raggiungimento degli obiettivi del corso')),
('annotazioni_corsisti', models.TextField(verbose_name='Annotazioni relative alla partecipazione dei corsisti')),
('annotazioni_risorse', models.TextField(help_text='Annotazioni relative a risorse e competenze di particolare rilevanza emerse durante il percorso formativo')),
('annotazioni_organizzazione_struttura', models.TextField(help_text="Annotazioni e segnalazioni sull'organizzazione e la logistica e della struttura ospitante il corso")),
('descrizione_attivita', models.TextField(help_text='Descrizione delle eventuali attività di tirocinio/affiancamento con indicazione dei Tutor')),
('corso', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='formazione.CorsoBase')),
],
options={
'verbose_name': 'Relazione del Direttore',
'verbose_name_plural': 'Relazioni dei Direttori',
},
),
]
| gpl-3.0 | 6,304,392,364,393,148,000 | 60.5 | 247 | 0.681572 | false | 3.432558 | false | false | false |
kgullikson88/General | Analyze_CCF.py | 1 | 9048 | """
This is a module to read in an HDF5 file with CCFs.
Use this to determine the best parameters, and plot the best CCF for each star/date
"""
from collections import defaultdict
import logging
import h5py
import numpy as np
import pandas as pd
from scipy.interpolate import InterpolatedUnivariateSpline as spline
class CCF_Interface(object):
def __init__(self, filename, vel=np.arange(-900, 900, 1)):
self.hdf5 = h5py.File(filename, 'r')
self.velocities = vel
self._df = None
def __getitem__(self, path):
return self.hdf5[path]
def list_stars(self, print2screen=False):
"""
List the stars available in the HDF5 file, and the dates available for each
:return: A list of the stars
"""
if print2screen:
for star in sorted(self.hdf5.keys()):
print(star)
for date in sorted(self.hdf5[star].keys()):
print('\t{}'.format(date))
return sorted(self.hdf5.keys())
def list_dates(self, star, print2screen=False):
"""
List the dates available for the given star
:param star: The name of the star
:return: A list of dates the star was observed
"""
if print2screen:
for date in sorted(self.hdf5[star].keys()):
print(date)
return sorted(self.hdf5[star].keys())
def load_cache(self, addmode='simple'):
"""
Read in the whole HDF5 file. This will take a while and take a few Gb of memory, but will speed things up considerably
:keyword addmode: The way the individual CCFs were added. Options are:
- 'simple'
- 'ml'
- 'all' (saves all addmodes)
"""
self._df = self._compile_data(addmode=addmode)
def _compile_data(self, starname=None, date=None, addmode='simple', read_ccf=True):
"""
Private function. This reads in all the datasets for the given star and date
:param starname: the name of the star. Must be in self.hdf5
:param date: The date to search. Must be in self.hdf5[star]
:keyword addmode: The way the individual CCFs were added. Options are:
- 'simple'
- 'ml'
- 'all' (saves all addmodes)
:return: a pandas DataFrame with the columns:
- star
- date
- temperature
- log(g)
- [Fe/H]
- vsini
- addmode
- rv (at maximum CCF value)
- CCF height (maximum)
"""
if starname is None:
df_list = []
star_list = self.list_stars()
for star in star_list:
date_list = self.list_dates(star)
for date in date_list:
logging.debug('Reading in metadata for star {}, date {}'.format(star, date))
df_list.append(self._compile_data(star, date, addmode=addmode, read_ccf=read_ccf))
return pd.concat(df_list, ignore_index=True)
elif starname is not None and date is None:
df_list = []
date_list = self.list_dates(starname)
for date in date_list:
logging.debug('Reading in metadata for date {}'.format(date))
df_list.append(self._compile_data(starname, date, addmode=addmode, read_ccf=read_ccf))
return pd.concat(df_list, ignore_index=True)
else:
if self._df is not None:
return self._df.loc[(self._df['Star'] == starname) & (self._df['Date'] == date)].copy()
#print('Stars: ', self.list_stars())
datasets = self.hdf5[starname][date].keys()
data = defaultdict(list)
for ds_name, ds in self.hdf5[starname][date].iteritems(): # in datasets:
#ds = self.hdf5[starname][date][ds_name]
try:
am = ds.attrs['addmode']
if addmode == 'all' or addmode == am:
data['T'].append(ds.attrs['T'])
data['logg'].append(ds.attrs['logg'])
data['[Fe/H]'].append(ds.attrs['[Fe/H]'])
data['vsini'].append(ds.attrs['vsini'])
data['addmode'].append(am)
data['name'].append(ds.name)
try:
data['ccf_max'].append(ds.attrs['ccf_max'])
data['vel_max'].append(ds.attrs['vel_max'])
except KeyError:
vel, corr = ds.value
idx = np.argmax(corr)
data['ccf_max'].append(corr[idx])
data['vel_max'].append(vel[idx])
if read_ccf:
v = ds.value
vel, corr = v[0], v[1]
sorter = np.argsort(vel)
fcn = spline(vel[sorter], corr[sorter])
data['ccf'].append(fcn(self.velocities))
except:
raise IOError('Something weird happened with dataset {}!'.format(ds.name))
data['Star'] = [starname] * len(data['T'])
data['Date'] = [date] * len(data['T'])
df = pd.DataFrame(data=data)
return df
def get_temperature_run(self, starname=None, date=None, df=None):
"""
Return the maximum ccf height for each temperature. Either starname AND date, or df must be given
:param starname: The name of the star
:param date: The date of the observation
:param df: Input dataframe, such as from _compile_data. Overrides starname and date, if given
:return: a pandas DataFrame with all the best parameters for each temperature
"""
# Get the dataframe if it isn't given
if df is None:
if starname is None or date is None:
raise ValueError('Must give either starname or date to get_temperature_run!')
df = self._compile_data(starname, date)
# Find the maximum CCF for each set of parameters
fcn = lambda row: (np.max(row), self.velocities[np.argmax(row)])
vals = df['ccf'].map(fcn)
df['ccf_max'] = vals.map(lambda l: l[0])
df['rv'] = vals.map(lambda l: l[1])
# Find the best parameters for each temperature
d = defaultdict(list)
temperatures = pd.unique(df['T'])
for T in temperatures:
good = df.loc[df['T'] == T]
best = good.loc[good.ccf_max == good.ccf_max.max()]
d['vsini'].append(best['vsini'].item())
d['logg'].append(best['logg'].item())
d['[Fe/H]'].append(best['[Fe/H]'].item())
d['rv'].append(best['rv'].item())
d['ccf_value'].append(best.ccf_max.item())
d['T'].append(T)
d['metal'].append(best['[Fe/H]'].item())
return pd.DataFrame(data=d)
def get_ccf(self, params, df=None):
"""
Get the ccf with the given parameters. A dataframe can be given to speed things up
:param params: All the parameters necessary to define a single ccf. This should be
a python dictionary with the keys:
- 'starname': The name of the star. Try self.list_stars() for the options.
- 'date': The UT date of the observations. Try self.list_dates() for the options.
- 'T': temperature of the model
- 'logg': the log(g) of the model
- 'vsini': the vsini by which the model was broadened before correlation
- '[Fe/H]': the metallicity of the model
- 'addmode': The way the order CCFs were added to make a total one. Can be:
- 'simple'
- 'ml'
- 'weighted'
- 'dc'
:param df: a pandas DataFrame such as outputted by _compile_data
:return: a pandas DataFrame with columns of velocity and CCF power
"""
if df is None:
try:
df = self._compile_data(params['starname'], params['date'])
except KeyError:
raise KeyError('Must give get_ccf params with starname and date keywords, if df is not given!')
Tvals = df['T'].unique()
T = Tvals[np.argmin(abs(Tvals - params['T']))]
good = df.loc[(df['T'] == T) & (df.logg == params['logg']) & (df.vsini == params['vsini']) \
& (df['[Fe/H]'] == params['[Fe/H]']) & (df.addmode == params['addmode'])]
return pd.DataFrame(data={'velocity': self.velocities, 'CCF': good['ccf'].item()})
| gpl-3.0 | 5,668,383,523,431,874,000 | 42.710145 | 126 | 0.513263 | false | 4.11647 | false | false | false |
clausqr/HTPC-Manager | modules/qbittorrent.py | 1 | 12490 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import htpc
import cherrypy
import json
import logging
import time
import math
from cherrypy.lib.auth2 import require
from htpc.helpers import striphttp, sizeof
import requests
from requests.auth import HTTPDigestAuth
class Qbittorrent(object):
session = requests.Session()
def __init__(self):
self.logger = logging.getLogger('modules.qbittorrent')
self.newapi = False
self.authenticated = False
self.testapi = None
htpc.MODULES.append({
'name': 'qBittorrent',
'id': 'qbittorrent',
'test': htpc.WEBDIR + 'qbittorrent/ping',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'qbittorrent_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'qbittorrent_name'},
{'type': 'text', 'label': 'IP / Host', 'placeholder': 'localhost', 'name': 'qbittorrent_host'},
{'type': 'text', 'label': 'Port', 'placeholder': '8080', 'name': 'qbittorrent_port'},
{'type': 'text', 'label': 'Username', 'name': 'qbittorrent_username'},
{'type': 'password', 'label': 'Password', 'name': 'qbittorrent_password'},
{'type': 'bool', 'label': 'Use SSL', 'name': 'qbittorrent_ssl'},
{'type': 'text', 'label': 'Reverse proxy link', 'placeholder': '', 'desc': 'Reverse proxy link ex: https://qbt.domain.com', 'name': 'qbittorrent_reverse_proxy_link'},
]
})
@cherrypy.expose()
@require()
def index(self):
return htpc.LOOKUP.get_template('qbittorrent.html').render(scriptname='qbittorrent', webinterface=self.webinterface())
def webinterface(self):
host = striphttp(htpc.settings.get('qbittorrent_host', ''))
port = htpc.settings.get('qbittorrent_port', '')
ssl = 's' if htpc.settings.get('qbittorrent_ssl', 0) else ''
url = 'http%s://%s:%s/' % (ssl, host, port)
if htpc.settings.get('qbittorrent_reverse_proxy_link'):
url = htpc.settings.get('qbittorrent_reverse_proxy_link')
return url
def qbturl(self):
host = striphttp(htpc.settings.get('qbittorrent_host', ''))
port = htpc.settings.get('qbittorrent_port', '')
ssl = 's' if htpc.settings.get('qbittorrent_ssl', 0) else ''
url = 'http%s://%s:%s/' % (ssl, host, port)
return url
@cherrypy.expose()
@require()
def login(self):
self.logger.debug('Trying to login to qbittorrent')
try:
d = {'username': htpc.settings.get('qbittorrent_username', ''),
'password': htpc.settings.get('qbittorrent_password', '')
}
# F33d da cookie monster
r = self.session.post(self.qbturl() + 'login', data=d, verify=False, timeout=5)
if r.content == 'Ok.':
self.logger.debug('Successfully logged in with new api')
self.authenticated = True
self.newapi = True
else:
self.logger.error('Check your username and password')
return r.content
except Exception as e:
self.logger.error('Failed to auth with new api %s' % e)
return
def _fetch(self, u, post=False, params={}, data=None):
host = striphttp(htpc.settings.get('qbittorrent_host', ''))
port = htpc.settings.get('qbittorrent_port', '')
ssl = 's' if htpc.settings.get('qbittorrent_ssl') else ''
url = 'http%s://%s:%s/' % (ssl, host, port)
username = htpc.settings.get('qbittorrent_username', '')
password = htpc.settings.get('qbittorrent_password', '')
url += u
if self.testapi is None:
self.ping()
if self.newapi:
if self.authenticated is False:
self.login()
if post:
if self.newapi:
r = self.session.post(url, data=data, verify=False, timeout=8)
else:
r = self.session.post(url, data=data, verify=False, timeout=8, auth=HTTPDigestAuth(username, password))
else:
if self.newapi:
r = self.session.get(url, verify=False, timeout=8)
else:
r = self.session.get(url, verify=False, timeout=8, auth=HTTPDigestAuth(username, password))
return r
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def fetch(self):
try:
if self.newapi:
result = self._fetch('query/torrents?filter=all&sort=size&reverse=false')
torrents = result.json()
l = []
for torrent in torrents:
t = {}
for k, v in torrent.items():
t[k] = v
if k == 'size':
t['size'] = sizeof(int(v))
if k == 'eta':
eta = time.strftime('%H:%M:%S', time.gmtime(v))
if eta == '00:00:00':
eta = u'\u221E'
t['eta'] = eta
if k == 'ratio':
t['ratio'] = math.ceil(v)
l.append(t)
return l
else:
result = self._fetch('json/torrents')
# r.json() does not like the infinity
return json.loads(result.content)
except Exception as e:
self.logger.error("Couldn't get torrents %s" % e)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def get_speed(self):
''' Get total download and upload speed '''
try:
d = {}
if not self.newapi:
result = self._fetch('json/transferInfo/')
result = result.json()
speeddown = result['dl_info']
speedup = result['up_info']
list_of_down = speeddown.split()
list_of_up = speedup.split()
ds = list_of_down[1] + ' ' + list_of_down[2]
dlstat = list_of_down[5] + ' ' + list_of_down[6]
us = list_of_up[1] + ' ' + list_of_up[2]
ulstat = list_of_down[5] + ' ' + list_of_down[6]
d = {
'qbittorrent_speed_down': ds,
'qbittorrent_speed_up': us,
'qbittorrent_total_dl': dlstat,
'qbittorrent_total_ul': ulstat
}
else:
# new api stuff
result = self._fetch('query/transferInfo')
result = result.json()
d = {
'qbittorrent_speed_down': sizeof(result['dl_info_speed']),
'qbittorrent_speed_up': sizeof(result['up_info_speed']),
'qbittorrent_total_dl': sizeof(result['dl_info_data']),
'qbittorrent_total_ul': sizeof(result['up_info_data'])
}
return d
except Exception as e:
self.logger.error("Couldn't get total download and uploads speed %s" % e)
def get_global_dl_limit(self):
try:
result = self._fetch('command/getGlobalDlLimit/')
speed = int(result.content)
speed /= 1024
return speed
except Exception as e:
self.logger.error("Couldn't get global download limit %s" % e)
def get_global_ul_limit(self):
try:
result = self._fetch('command/getGlobalUpLimit')
speed = int(result.content)
speed /= 1024
return speed
except Exception as e:
self.logger.error("Couldn't get global upload limit %s" % e)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def get_global_limit(self):
try:
d = {}
d['dl_limit'] = self.get_global_dl_limit()
d['ul_limit'] = self.get_global_ul_limit()
return d
except Exception as e:
self.logger.debug("Couldn't get global upload and download limits %s" % e)
@cherrypy.expose()
@require()
def command(self, cmd=None, hash=None, name=None, dlurl=None):
''' Handles pause, resume, delete singel torrents '''
try:
self.logger.debug('%s %s' % (cmd, name))
data = {}
if cmd == 'delete':
data['hashes'] = hash
elif cmd == 'download':
data['urls'] = dlurl
elif cmd == 'resumeall' or cmd == 'pauseall':
# this does not work, bug in qbt see
# https://github.com/qbittorrent/qBittorrent/issues/3016
if self.newapi:
cmd = cmd[:-3] + 'All'
else:
data['hash'] = hash
url = 'command/%s' % cmd
# data is form encode..
r = self._fetch(url, post=True, data=data)
return r.content
except Exception as e:
self.logger.error('Failed at %s %s %s %s' % (cmd, name, hash, e))
@cherrypy.expose()
@require()
def to_client(self, link, torrentname, **kwargs):
''' Is used by torrent search '''
try:
url = 'command/download/'
data = {}
data['urls'] = link
return self._fetch(url, data=data, post=True)
self.logger.info('%s %s is sendt to qBittorrent' % (torrentname, link))
except Exception as e:
self.logger.error('Failed to send %s %s to qBittorrent %s' % (link, torrentname, e))
@cherrypy.expose()
@require()
def set_speedlimit(self, type=None, speed=None):
''' Sets global upload and download speed '''
try:
self.logger.debug('Setting %s to %s' % (type, speed))
speed = int(speed)
if speed == 0:
speed = 0
else:
speed = speed * 1024
url = 'command/' + type + '/'
data = {}
data['limit'] = speed
r = self._fetch(url, data=data, post=True)
return r.content
except Exception as e:
self.logger.error('Failed to set %s to %s %s' % (type, speed, e))
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def ping(self, qbittorrent_host='', qbittorrent_port='', qbittorrent_username='', qbittorrent_password='', qbittorrent_ssl=False, **kw):
self.logger.debug('Trying to connect to qBittorret')
host = qbittorrent_host or htpc.settings.get('qbittorrent_host')
port = qbittorrent_port or htpc.settings.get('qbittorrent_port')
username = qbittorrent_username or htpc.settings.get('qbittorrent_username')
password = qbittorrent_password or htpc.settings.get('qbittorrent_password')
ssl = 's' if qbittorrent_ssl or htpc.settings.get('qbittorrent_ssl') else ''
url = 'http%s://%s:%s/' % (ssl, host, port)
self.newapi = False
self.authenticated = False
try:
# We assume that its atleast 3.2 if this works.
r = requests.get(url + 'version/api', timeout=8, verify=False)
self.logger.debug('Trying to connect with new API %s' % r.url)
# Old api returns a empty page
if r.content != '' and r.ok:
self.newapi = r.content
self.testapi = True
return r.content
else:
raise requests.ConnectionError
except Exception as e:
self.logger.debug('Failed to figure out what api version, trying old API')
try:
r = requests.post(url + 'json/torrents', auth=HTTPDigestAuth(username, password), timeout=10, verify=False)
if r.ok:
self.logger.debug('Old API works %s' % r.url)
# Disable new api stuff
self.testapi = True
self.newapi = False
self.authenticated = False
except Exception as e:
self.newapi = False
self.authenticated = False
self.logger.debug('Failed to contact qBittorrent via old and newapi')
self.logger.error('Cant contact qBittorrent, check you settings and try again %s' % e)
| mit | -863,452,485,712,000,900 | 36.507508 | 182 | 0.517374 | false | 3.945041 | false | false | false |
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/algorithms/swap.py | 4 | 9829 | """Swap edges in a graph.
"""
import math
from networkx.utils import py_random_state
import networkx as nx
__all__ = ["double_edge_swap", "connected_double_edge_swap"]
@py_random_state(3)
def double_edge_swap(G, nswap=1, max_tries=100, seed=None):
"""Swap two edges in the graph while keeping the node degrees fixed.
A double-edge swap removes two randomly chosen edges u-v and x-y
and creates the new edges u-x and v-y::
u--v u v
becomes | |
x--y x y
If either the edge u-x or v-y already exist no swap is performed
and another attempt is made to find a suitable edge pair.
Parameters
----------
G : graph
An undirected graph
nswap : integer (optional, default=1)
Number of double-edge swaps to perform
max_tries : integer (optional)
Maximum number of attempts to swap edges
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : graph
The graph after double edge swaps.
Notes
-----
Does not enforce any connectivity constraints.
The graph G is modified in place.
"""
if G.is_directed():
raise nx.NetworkXError("double_edge_swap() not defined for directed graphs.")
if nswap > max_tries:
raise nx.NetworkXError("Number of swaps > number of tries allowed.")
if len(G) < 4:
raise nx.NetworkXError("Graph has less than four nodes.")
# Instead of choosing uniformly at random from a generated edge list,
# this algorithm chooses nonuniformly from the set of nodes with
# probability weighted by degree.
n = 0
swapcount = 0
keys, degrees = zip(*G.degree()) # keys, degree
cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree
discrete_sequence = nx.utils.discrete_sequence
while swapcount < nswap:
# if random.random() < 0.5: continue # trick to avoid periodicities?
# pick two random edges without creating edge list
# choose source node indices from discrete distribution
(ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed)
if ui == xi:
continue # same source, skip
u = keys[ui] # convert index to label
x = keys[xi]
# choose target uniformly from neighbors
v = seed.choice(list(G[u]))
y = seed.choice(list(G[x]))
if v == y:
continue # same target, skip
if (x not in G[u]) and (y not in G[v]): # don't create parallel edges
G.add_edge(u, x)
G.add_edge(v, y)
G.remove_edge(u, v)
G.remove_edge(x, y)
swapcount += 1
if n >= max_tries:
e = (
f"Maximum number of swap attempts ({n}) exceeded "
f"before desired swaps achieved ({nswap})."
)
raise nx.NetworkXAlgorithmError(e)
n += 1
return G
@py_random_state(3)
def connected_double_edge_swap(G, nswap=1, _window_threshold=3, seed=None):
"""Attempts the specified number of double-edge swaps in the graph `G`.
A double-edge swap removes two randomly chosen edges `(u, v)` and `(x,
y)` and creates the new edges `(u, x)` and `(v, y)`::
u--v u v
becomes | |
x--y x y
If either `(u, x)` or `(v, y)` already exist, then no swap is performed
so the actual number of swapped edges is always *at most* `nswap`.
Parameters
----------
G : graph
An undirected graph
nswap : integer (optional, default=1)
Number of double-edge swaps to perform
_window_threshold : integer
The window size below which connectedness of the graph will be checked
after each swap.
The "window" in this function is a dynamically updated integer that
represents the number of swap attempts to make before checking if the
graph remains connected. It is an optimization used to decrease the
running time of the algorithm in exchange for increased complexity of
implementation.
If the window size is below this threshold, then the algorithm checks
after each swap if the graph remains connected by checking if there is a
path joining the two nodes whose edge was just removed. If the window
size is above this threshold, then the algorithm performs do all the
swaps in the window and only then check if the graph is still connected.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
int
The number of successful swaps
Raises
------
NetworkXError
If the input graph is not connected, or if the graph has fewer than four
nodes.
Notes
-----
The initial graph `G` must be connected, and the resulting graph is
connected. The graph `G` is modified in place.
References
----------
.. [1] C. Gkantsidis and M. Mihail and E. Zegura,
The Markov chain simulation method for generating connected
power law random graphs, 2003.
http://citeseer.ist.psu.edu/gkantsidis03markov.html
"""
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected")
if len(G) < 4:
raise nx.NetworkXError("Graph has less than four nodes.")
n = 0
swapcount = 0
deg = G.degree()
# Label key for nodes
dk = list(n for n, d in G.degree())
cdf = nx.utils.cumulative_distribution(list(d for n, d in G.degree()))
discrete_sequence = nx.utils.discrete_sequence
window = 1
while n < nswap:
wcount = 0
swapped = []
# If the window is small, we just check each time whether the graph is
# connected by checking if the nodes that were just separated are still
# connected.
if window < _window_threshold:
# This Boolean keeps track of whether there was a failure or not.
fail = False
while wcount < window and n < nswap:
# Pick two random edges without creating the edge list. Choose
# source nodes from the discrete degree distribution.
(ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed)
# If the source nodes are the same, skip this pair.
if ui == xi:
continue
# Convert an index to a node label.
u = dk[ui]
x = dk[xi]
# Choose targets uniformly from neighbors.
v = seed.choice(list(G.neighbors(u)))
y = seed.choice(list(G.neighbors(x)))
# If the target nodes are the same, skip this pair.
if v == y:
continue
if x not in G[u] and y not in G[v]:
G.remove_edge(u, v)
G.remove_edge(x, y)
G.add_edge(u, x)
G.add_edge(v, y)
swapped.append((u, v, x, y))
swapcount += 1
n += 1
# If G remains connected...
if nx.has_path(G, u, v):
wcount += 1
# Otherwise, undo the changes.
else:
G.add_edge(u, v)
G.add_edge(x, y)
G.remove_edge(u, x)
G.remove_edge(v, y)
swapcount -= 1
fail = True
# If one of the swaps failed, reduce the window size.
if fail:
window = int(math.ceil(window / 2))
else:
window += 1
# If the window is large, then there is a good chance that a bunch of
# swaps will work. It's quicker to do all those swaps first and then
# check if the graph remains connected.
else:
while wcount < window and n < nswap:
# Pick two random edges without creating the edge list. Choose
# source nodes from the discrete degree distribution.
(ui, xi) = nx.utils.discrete_sequence(2, cdistribution=cdf)
# If the source nodes are the same, skip this pair.
if ui == xi:
continue
# Convert an index to a node label.
u = dk[ui]
x = dk[xi]
# Choose targets uniformly from neighbors.
v = seed.choice(list(G.neighbors(u)))
y = seed.choice(list(G.neighbors(x)))
# If the target nodes are the same, skip this pair.
if v == y:
continue
if x not in G[u] and y not in G[v]:
G.remove_edge(u, v)
G.remove_edge(x, y)
G.add_edge(u, x)
G.add_edge(v, y)
swapped.append((u, v, x, y))
swapcount += 1
n += 1
wcount += 1
# If the graph remains connected, increase the window size.
if nx.is_connected(G):
window += 1
# Otherwise, undo the changes from the previous window and decrease
# the window size.
else:
while swapped:
(u, v, x, y) = swapped.pop()
G.add_edge(u, v)
G.add_edge(x, y)
G.remove_edge(u, x)
G.remove_edge(v, y)
swapcount -= 1
window = int(math.ceil(window / 2))
return swapcount
| gpl-3.0 | 5,574,572,882,945,417,000 | 35.539033 | 85 | 0.550005 | false | 4.220266 | false | false | false |
durandj/ynot-django | ynot/django/themes/templatetags/breadcrumbs.py | 1 | 2421 | from django import template as django_template
from django.template import defaulttags as django_defaulttags
from django.utils import encoding as django_encoding
# pylint: disable=invalid-name, too-few-public-methods
register = django_template.Library()
# pylint: disable=unused-argument
@register.tag
def breadcrumb(parser, token):
"""
Render breadcrumbs in the form of:
{% breadcrumb 'Breadcrumb title' url %}
"""
return BreadcrumbNode(token.split_contents()[1:])
# pylint: enable=unused-argument
@register.tag
def breadcrumb_url(parser, token):
"""
Render breadcrumbs in the form of:
{% breadcrumb 'Breadcrumb title' url args %}
"""
contents = token.split_contents()
if len(contents) == 2:
return breadcrumb(parser, token) # Shortcut to normal breadcrumbs
title = contents.pop(1)
token.contents = ' '.join(contents)
url = django_defaulttags.url(parser, token)
return UrlBreadcrumbNode(title, url)
class BreadcrumbNode(django_template.Node):
def __init__(self, args):
self.args = [django_template.Variable(arg) for arg in args]
def render(self, context):
title = self.args[0].var
if title.find('\'') == -1 and title.find('\"') == -1:
try:
val = self.args[0]
title = val.resolve(context)
except:
title = ''
else:
title = django_encoding.smart_unicode(title.strip('\'').strip('\"'))
url = None
if len(self.args) > 1:
val = self.args[1]
try:
url = val.resolve(context)
except django_template.VariableDoesNotExist:
url = None
return render_breadcrumb(title, url = url)
class UrlBreadcrumbNode(django_template.Node):
def __init__(self, title, url_node):
self.title = django_template.Variable(title)
self.url_node = url_node
def render(self, context):
title = self.title.var
if title.find('\'') == -1 and title.find('\"') == -1:
try:
val = self.title
title = val.resolve(context)
except:
title = ''
else:
title = django_encoding.smart_unicode(title.strip('\'').strip('\"'))
url = self.url_node.render(context)
return render_breadcrumb(title, url = url)
def render_breadcrumb(title, url = None):
if url:
breadcrumb_node = '<a href="{url}">{title}</a>'.format(
title = title,
url = url,
)
else:
breadcrumb_node = '<span>{title}</span>'.format(title = title)
breadcrumb_node = '<span class="ynot-breadcrumb">{}</span>'.format(breadcrumb_node)
return breadcrumb
| mit | -5,690,095,699,108,651,000 | 24.21875 | 84 | 0.674515 | false | 3.030038 | false | false | false |
edx/edx-enterprise | enterprise/migrations/0111_pendingenterprisecustomeradminuser.py | 1 | 3009 | # Generated by Django 2.2.15 on 2020-09-09 14:31
import simple_history.models
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('enterprise', '0110_add_default_contract_discount'),
]
operations = [
migrations.CreateModel(
name='HistoricalPendingEnterpriseCustomerAdminUser',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('user_email', models.EmailField(max_length=254)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('enterprise_customer', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='enterprise.EnterpriseCustomer')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'history_date',
'verbose_name': 'historical pending enterprise customer admin user',
'ordering': ('-history_date', '-history_id'),
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='PendingEnterpriseCustomerAdminUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('user_email', models.EmailField(max_length=254)),
('enterprise_customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='enterprise.EnterpriseCustomer')),
],
options={
'unique_together': {('enterprise_customer', 'user_email')},
'ordering': ['created'],
},
),
]
| agpl-3.0 | -8,918,742,934,730,054,000 | 52.732143 | 205 | 0.627783 | false | 4.256011 | false | false | false |
blueshed/blueshed-micro | blueshed/micro/utils/executor.py | 1 | 2437 | from blueshed.micro.utils import resources
from tornado.concurrent import Future
from tornado.ioloop import IOLoop
from tornado.autoreload import add_reload_hook
from functools import wraps
import logging
import os
import inspect
from concurrent.futures.process import ProcessPoolExecutor
LOGGER = logging.getLogger(__name__)
_pool_ = None
def pool_init(pool):
global _pool_
_pool_ = pool
def pool_init_processes(pool_size, debug=False):
micro_pool = ProcessPoolExecutor(pool_size)
pool_init(micro_pool)
if debug is True:
add_reload_hook(micro_pool.shutdown)
logging.info("pool intialized with %s processes", pool_size)
return micro_pool
def global_pool():
global _pool_
return _pool_
def register_pool(name, pool):
resources.set_resource(name, pool)
def has_micro_context(f):
for k, v in inspect.signature(f).parameters.items():
if v.annotation == 'micro_context':
return k
def run_in_pool(_pid, _f, _has_context, context, *args, **kwargs):
# globals from the parent process in the
# IOLoop so clear them.
subprocess = os.getpid() != _pid
if subprocess and IOLoop.current(False):
LOGGER.debug("clearing tornado globals")
IOLoop.clear_current()
IOLoop.clear_instance()
LOGGER.debug("running %s %s", os.getpid(), context)
if _has_context:
kwargs[_has_context] = context
result = _f(*args, **kwargs)
if not subprocess:
return result
if isinstance(result, Future):
LOGGER.debug('running up tornado to complete')
def done(*args, **kwargs):
LOGGER.debug('stopping tornado')
IOLoop.current().stop()
result.add_done_callback(done)
IOLoop.current().start()
result = result.result()
return context, result
def pool(_f, resource_name=None):
has_context = has_micro_context(_f)
@wraps(_f)
def call(_f, context, *args, **kwargs):
global _pool_
if resource_name:
pool = resources.get_resource(resource_name)
elif _pool_:
pool = _pool_
if pool:
result = pool.submit(run_in_pool, os.getpid(), _f,
has_context, context, *args, **kwargs)
else:
if has_context:
kwargs[has_context] = context
result = _f(*args, **kwargs)
return result
return call
| mit | -5,598,329,544,635,889,000 | 26.077778 | 71 | 0.623718 | false | 3.886762 | false | false | false |
espressofiend/NCIL-SOC-2015 | PsychoPy/stroop_lastrun.py | 1 | 14481 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy2 Experiment Builder (v1.82.00), Mon Jun 22 22:53:33 2015
If you publish work using this script please cite the relevant PsychoPy publications
Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.
Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""
from __future__ import division # so that 1/3=0.333 instead of 1/3=0
from psychopy import visual, core, data, event, logging, sound, gui
from psychopy.constants import * # things like STARTED, FINISHED
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
expName = u'stroop' # from the Builder filename that created this script
expInfo = {u'session': u'001', u'participant': u''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False: core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + 'data/%s_%s_%s' %(expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=u'/Users/aaron/Documents/GitHub/NCIL-SOC-2015/PsychoPy/stroop.psyexp',
savePickle=True, saveWideText=True,
dataFileName=filename)
#save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(size=(2560, 1440), fullscr=True, screen=0, allowGUI=False, allowStencil=False,
monitor=u'testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True,
)
# store frame rate of monitor if we can measure it successfully
expInfo['frameRate']=win.getActualFrameRate()
if expInfo['frameRate']!=None:
frameDur = 1.0/round(expInfo['frameRate'])
else:
frameDur = 1.0/60.0 # couldn't get a reliable measure so guess
# Initialize components for Routine "instr"
instrClock = core.Clock()
instructionText = visual.TextStim(win=win, ori=0, name='instructionText',
text=u'Press left arrow if colour matches word\n\nPress right arrow if colour does not match word\n\nPress either arrow to start.', font=u'Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color=u'white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "trial"
trialClock = core.Clock()
ISI = core.StaticPeriod(win=win, screenHz=expInfo['frameRate'], name='ISI')
text = visual.TextStim(win=win, ori=0, name='text',
text=u'XXXXXX', font=u'Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color=u'white', colorSpace='rgb', opacity=1,
depth=-1.0)
text_2 = visual.TextStim(win=win, ori=0, name='text_2',
text=u'+', font=u'Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color=u'blue', colorSpace='rgb', opacity=1,
depth=-2.0)
text_3 = visual.TextStim(win=win, ori=0, name='text_3',
text='default text', font=u'Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color=1.0, colorSpace='rgb', opacity=1,
depth=-3.0)
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
#------Prepare to start Routine "instr"-------
t = 0
instrClock.reset() # clock
frameN = -1
# update component parameters for each repeat
key_resp_3 = event.BuilderKeyResponse() # create an object of type KeyResponse
key_resp_3.status = NOT_STARTED
# keep track of which components have finished
instrComponents = []
instrComponents.append(instructionText)
instrComponents.append(key_resp_3)
for thisComponent in instrComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "instr"-------
continueRoutine = True
while continueRoutine:
# get current time
t = instrClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *instructionText* updates
if t >= 0.0 and instructionText.status == NOT_STARTED:
# keep track of start time/frame for later
instructionText.tStart = t # underestimates by a little under one frame
instructionText.frameNStart = frameN # exact frame index
instructionText.setAutoDraw(True)
# *key_resp_3* updates
if t >= 0.0 and key_resp_3.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_3.tStart = t # underestimates by a little under one frame
key_resp_3.frameNStart = frameN # exact frame index
key_resp_3.status = STARTED
# keyboard checking is just starting
key_resp_3.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if key_resp_3.status == STARTED:
theseKeys = event.getKeys(keyList=['left', 'right'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
key_resp_3.keys = theseKeys[-1] # just the last key pressed
key_resp_3.rt = key_resp_3.clock.getTime()
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
routineTimer.reset() # if we abort early the non-slip timer needs reset
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instrComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
else: # this Routine was not non-slip safe so reset non-slip timer
routineTimer.reset()
#-------Ending Routine "instr"-------
for thisComponent in instrComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_3.keys in ['', [], None]: # No response was made
key_resp_3.keys=None
# store data for thisExp (ExperimentHandler)
thisExp.addData('key_resp_3.keys',key_resp_3.keys)
if key_resp_3.keys != None: # we had a response
thisExp.addData('key_resp_3.rt', key_resp_3.rt)
thisExp.nextEntry()
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=1, method='random',
extraInfo=expInfo, originPath=u'/Users/aaron/Documents/GitHub/NCIL-SOC-2015/PsychoPy/stroop.psyexp',
trialList=data.importConditions(u'psychopy_playing_conditions.xlsx'),
seed=666, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
#------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
routineTimer.add(5.000000)
# update component parameters for each repeat
text_3.setColor(colour, colorSpace='rgb')
text_3.setText(word)
key_resp_2 = event.BuilderKeyResponse() # create an object of type KeyResponse
key_resp_2.status = NOT_STARTED
# keep track of which components have finished
trialComponents = []
trialComponents.append(ISI)
trialComponents.append(text)
trialComponents.append(text_2)
trialComponents.append(text_3)
trialComponents.append(key_resp_2)
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "trial"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text* updates
if t >= 0.0 and text.status == NOT_STARTED:
# keep track of start time/frame for later
text.tStart = t # underestimates by a little under one frame
text.frameNStart = frameN # exact frame index
text.setAutoDraw(True)
if text.status == STARTED and t >= (0.0 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
text.setAutoDraw(False)
# *text_2* updates
if t >= 1.5 and text_2.status == NOT_STARTED:
# keep track of start time/frame for later
text_2.tStart = t # underestimates by a little under one frame
text_2.frameNStart = frameN # exact frame index
text_2.setAutoDraw(True)
if text_2.status == STARTED and t >= (1.5 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
text_2.setAutoDraw(False)
# *text_3* updates
if t >= 3 and text_3.status == NOT_STARTED:
# keep track of start time/frame for later
text_3.tStart = t # underestimates by a little under one frame
text_3.frameNStart = frameN # exact frame index
text_3.setAutoDraw(True)
if text_3.status == STARTED and t >= (3 + (2-win.monitorFramePeriod*0.75)): #most of one frame period left
text_3.setAutoDraw(False)
# *key_resp_2* updates
if t >= 3 and key_resp_2.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_2.tStart = t # underestimates by a little under one frame
key_resp_2.frameNStart = frameN # exact frame index
key_resp_2.status = STARTED
# keyboard checking is just starting
key_resp_2.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if key_resp_2.status == STARTED and t >= (3 + (2-win.monitorFramePeriod*0.75)): #most of one frame period left
key_resp_2.status = STOPPED
if key_resp_2.status == STARTED:
theseKeys = event.getKeys(keyList=['left', 'right'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
key_resp_2.keys = theseKeys[-1] # just the last key pressed
key_resp_2.rt = key_resp_2.clock.getTime()
# was this 'correct'?
if (key_resp_2.keys == str(corrAns)) or (key_resp_2.keys == corrAns):
key_resp_2.corr = 1
else:
key_resp_2.corr = 0
# a response ends the routine
continueRoutine = False
# *ISI* period
if t >= 0.0 and ISI.status == NOT_STARTED:
# keep track of start time/frame for later
ISI.tStart = t # underestimates by a little under one frame
ISI.frameNStart = frameN # exact frame index
ISI.start(0.5)
elif ISI.status == STARTED: #one frame should pass before updating params and completing
ISI.complete() #finish the static period
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
routineTimer.reset() # if we abort early the non-slip timer needs reset
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_2.keys in ['', [], None]: # No response was made
key_resp_2.keys=None
# was no response the correct answer?!
if str(corrAns).lower() == 'none': key_resp_2.corr = 1 # correct non-response
else: key_resp_2.corr = 0 # failed to respond (incorrectly)
# store data for trials (TrialHandler)
trials.addData('key_resp_2.keys',key_resp_2.keys)
trials.addData('key_resp_2.corr', key_resp_2.corr)
if key_resp_2.keys != None: # we had a response
trials.addData('key_resp_2.rt', key_resp_2.rt)
thisExp.nextEntry()
# completed 1 repeats of 'trials'
win.close()
core.quit()
| mit | 883,825,306,107,728,600 | 43.832817 | 153 | 0.653477 | false | 3.55886 | false | false | false |
SauloAislan/ironic | ironic/common/keystone.py | 1 | 4019 | # coding=utf-8
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Central place for handling Keystone authorization and service lookup."""
from keystoneauth1 import exceptions as kaexception
from keystoneauth1 import loading as kaloading
from oslo_log import log as logging
import six
from ironic.common import exception
from ironic.conf import CONF
LOG = logging.getLogger(__name__)
def ks_exceptions(f):
"""Wraps keystoneclient functions and centralizes exception handling."""
@six.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except kaexception.EndpointNotFound:
service_type = kwargs.get('service_type', 'baremetal')
endpoint_type = kwargs.get('endpoint_type', 'internal')
raise exception.CatalogNotFound(
service_type=service_type, endpoint_type=endpoint_type)
except (kaexception.Unauthorized, kaexception.AuthorizationFailure):
raise exception.KeystoneUnauthorized()
except (kaexception.NoMatchingPlugin,
kaexception.MissingRequiredOptions) as e:
raise exception.ConfigInvalid(six.text_type(e))
except Exception as e:
LOG.exception('Keystone request failed: %(msg)s',
{'msg': six.text_type(e)})
raise exception.KeystoneFailure(six.text_type(e))
return wrapper
@ks_exceptions
def get_session(group, **session_kwargs):
"""Loads session object from options in a configuration file section.
The session_kwargs will be passed directly to keystoneauth1 Session
and will override the values loaded from config.
Consult keystoneauth1 docs for available options.
:param group: name of the config section to load session options from
"""
return kaloading.load_session_from_conf_options(
CONF, group, **session_kwargs)
@ks_exceptions
def get_auth(group, **auth_kwargs):
"""Loads auth plugin from options in a configuration file section.
The auth_kwargs will be passed directly to keystoneauth1 auth plugin
and will override the values loaded from config.
Note that the accepted kwargs will depend on auth plugin type as defined
by [group]auth_type option.
Consult keystoneauth1 docs for available auth plugins and their options.
:param group: name of the config section to load auth plugin options from
"""
try:
auth = kaloading.load_auth_from_conf_options(CONF, group,
**auth_kwargs)
except kaexception.MissingRequiredOptions:
LOG.error('Failed to load auth plugin from group %s', group)
raise
return auth
# NOTE(pas-ha) Used by neutronclient and resolving ironic API only
# FIXME(pas-ha) remove this while moving to kesytoneauth adapters
@ks_exceptions
def get_service_url(session, **kwargs):
"""Find endpoint for given service in keystone catalog.
If 'interrace' is provided, fetches service url of this interface.
Otherwise, first tries to fetch 'internal' endpoint,
and then the 'public' one.
:param session: keystoneauth Session object
:param kwargs: any other arguments accepted by Session.get_endpoint method
"""
if 'interface' in kwargs:
return session.get_endpoint(**kwargs)
try:
return session.get_endpoint(interface='internal', **kwargs)
except kaexception.EndpointNotFound:
return session.get_endpoint(interface='public', **kwargs)
| apache-2.0 | 4,339,364,489,607,921,700 | 35.87156 | 78 | 0.700423 | false | 4.368478 | true | false | false |
gnoack/ukechord | chordpro.py | 1 | 4308 | """Read ChordPro files and output them through a PDFWriter object"""
import re
import song
import uke
class ChordProError(Exception):
"""Error in a ChordPro input."""
pass
def _analyze_chordpro_textline(line):
"""Analyze the text and chords in a line of text.
Args:
line: The line of text, with chords in square brackets.
Returns:
A list of (chord, textchunk) tuples.
The chord is None for a leading piece of text without preceding chord.
Example:
Input: "This is [Dm]an example [C]line."
Output: [(None, "This is "), ("Dm", "an example "), ("C", "line.")]
"""
matches = list(re.finditer(r"\[([^\]]+)\]([^\[]*)", line))
if matches:
result = []
if matches[0].start(0):
result.append((None, line[:matches[0].start(0)]))
for match in matches:
result.append(match.groups())
return result
return [(None, line)]
def _chordpro_line(line):
"""Analyze a ChordPro line into a key value pair.
For commands of the form "{key:value}", those will be the key and value.
For empty lines, key is "$empty", and value is None.
For text lines, returns "$lyrics" as key
and a list of (chord, text) tuples as value
"""
line = line.strip()
if not line or line.startswith("#"):
return ("$empty", None)
if line.startswith("{") and line.endswith("}"):
key, unused_colon, value = line[1:-1].partition(":")
return (key, value)
else:
return ("$lyrics", _analyze_chordpro_textline(line))
def _parse_chord_definition(value):
# TODO: Is it required to define 'fingers' in each chord definition?
match = re.match(
r"\s+(?P<name>[A-Za-z0-9/+#]*)\s+"
r"frets\s+(?P<frets>[\d\s]+)"
r"fingers\s+(?P<fingers>[\d\s]+)$",
value)
# TODO: Implement finger positioning support
# TODO: Catch too high fret values
if not match:
raise ChordProError("Chord definition parsing failed", value)
frets = [int(fret) for fret in match.group('frets').split(' ') if fret]
if any(fret > uke.MAX_FRET for fret in frets):
raise ChordProError("Frets beyond %d don't exist.", uke.MAX_FRET)
return match.group('name'), tuple(frets)
def _convert_lines_to_ast_nodes(lines, chords, end_of_section_markers=()):
result = []
for key, value in lines:
if key in end_of_section_markers:
break
elif key == "$empty":
pass # ignore
elif key in ("$lyrics", "comment"):
if key == "$lyrics":
first_verse_item = song.Line(value)
elif key == "comment":
first_verse_item = song.Comment(value)
else:
raise ChordProError("Should never happen. - Programming error")
# Text
if end_of_section_markers:
# If we're in a section, lines are fine.
result.append(first_verse_item)
else:
verse_lines = _convert_lines_to_ast_nodes(
lines, chords=chords,
end_of_section_markers=("$empty"))
result.append(song.Verse([first_verse_item] + verse_lines))
elif key in ("soc", "start-of-chorus", "start_of_chorus"):
if end_of_section_markers:
raise ChordProError("ChordPro: Nested choruses are not supported.")
result.append(song.Chorus(
_convert_lines_to_ast_nodes(
lines, chords=chords,
end_of_section_markers=("eoc", "end-of-chorus", "end_of_chorus"))))
elif key == "define":
name, frets = _parse_chord_definition(value)
chords[name] = frets
elif key in ("title", "subtitle"):
continue # Handled earlier.
elif key == "fontsize":
# TODO: How to handle font size?
pass # Should translate to pdf_writer.setFontsize(int(value))
elif key in ("eoc", "end-of-chorus", "end_of_chorus"):
# If not already part of breaking condition.
raise ChordProError(
"End-of-chorus ChordPro command without matching start.")
else:
raise ChordProError("Unknown ChordPro command: %s", key)
return result
def to_ast(infile):
lines = [_chordpro_line(line) for line in infile.readlines()]
keys_and_values = dict(lines)
title = keys_and_values.get("title", "").strip()
subtitle = keys_and_values.get("subtitle", "").strip()
chords = {}
children = _convert_lines_to_ast_nodes(iter(lines), chords=chords)
return song.Song(children, title=title, subtitle=subtitle, chords=chords)
| apache-2.0 | -5,033,884,073,311,483,000 | 31.885496 | 77 | 0.634401 | false | 3.293578 | false | false | false |
remind101/stacker_blueprints | stacker_blueprints/policies.py | 1 | 6595 | from awacs.aws import (
Action,
Allow,
Policy,
Principal,
Statement,
)
from troposphere import (
Sub,
Join,
Region,
AccountId,
AWSHelperFn
)
from awacs import (
sts,
s3,
logs,
ec2,
dynamodb,
cloudwatch,
)
def make_simple_assume_statement(*principals):
return Statement(
Principal=Principal('Service', principals),
Effect=Allow,
Action=[sts.AssumeRole])
def make_simple_assume_policy(*principals):
return Policy(
Statement=[
make_simple_assume_statement(*principals)])
def dynamodb_arn(table_name):
return 'arn:aws:dynamodb:::table/{}'.format(table_name)
def dynamodb_arns(table_names):
return [dynamodb_arn(table_name) for table_name in table_names]
def s3_arn(bucket):
if isinstance(bucket, AWSHelperFn):
return Sub('arn:aws:s3:::${Bucket}', Bucket=bucket)
else:
return 'arn:aws:s3:::%s' % bucket
def s3_objects_arn(bucket, folder="*"):
if isinstance(bucket, AWSHelperFn):
return Sub('arn:aws:s3:::${Bucket}/%s' % folder, Bucket=bucket)
else:
return 'arn:aws:s3:::%s/%s' % (bucket, folder)
def read_only_s3_bucket_policy_statements(buckets, folder="*"):
""" Read only policy an s3 bucket. """
list_buckets = [s3_arn(b) for b in buckets]
object_buckets = [s3_objects_arn(b, folder) for b in buckets]
bucket_resources = list_buckets + object_buckets
return [
Statement(
Effect=Allow,
Resource=[s3_arn("*")],
Action=[s3.ListAllMyBuckets]
),
Statement(
Effect=Allow,
Resource=bucket_resources,
Action=[Action('s3', 'Get*'), Action('s3', 'List*')]
)
]
def read_only_s3_bucket_policy(buckets):
return Policy(Statement=read_only_s3_bucket_policy_statements(buckets))
def read_write_s3_bucket_policy_statements(buckets, folder="*"):
list_buckets = [s3_arn(b) for b in buckets]
object_buckets = [s3_objects_arn(b, folder) for b in buckets]
return [
Statement(
Effect="Allow",
Action=[
s3.GetBucketLocation,
s3.ListAllMyBuckets,
],
Resource=[s3_arn("*")]
),
Statement(
Effect=Allow,
Action=[
s3.ListBucket,
s3.GetBucketVersioning,
],
Resource=list_buckets,
),
Statement(
Effect=Allow,
Action=[
s3.GetObject,
s3.PutObject,
s3.PutObjectAcl,
s3.DeleteObject,
s3.GetObjectVersion,
s3.DeleteObjectVersion,
],
Resource=object_buckets,
),
]
def read_write_s3_bucket_policy(buckets):
return Policy(Statement=read_write_s3_bucket_policy_statements(buckets))
def static_website_bucket_policy(bucket):
"""
Attach this policy directly to an S3 bucket to make it a static website.
This policy grants read access to **all unauthenticated** users.
"""
return Policy(
Statement=[
Statement(
Effect=Allow,
Principal=Principal("*"),
Action=[s3.GetObject],
Resource=[s3_objects_arn(bucket)],
)
]
)
def log_stream_arn(log_group_name, log_stream_name):
return Join(
'',
[
"arn:aws:logs:", Region, ":", AccountId, ":log-group:",
log_group_name, ":log-stream:", log_stream_name
]
)
def write_to_cloudwatch_logs_stream_statements(log_group_name,
log_stream_name):
return [
Statement(
Effect=Allow,
Action=[logs.PutLogEvents],
Resource=[log_stream_arn(log_group_name, log_stream_name)]
)
]
def write_to_cloudwatch_logs_stream_policy(log_group_name, log_stream_name):
return Policy(
Statement=write_to_cloudwatch_logs_stream_statements(log_group_name,
log_stream_name)
)
def cloudwatch_logs_write_statements(log_group=None):
resources = ["arn:aws:logs:*:*:*"]
if log_group:
log_group_parts = ["arn:aws:logs:", Region, ":", AccountId,
":log-group:", log_group]
log_group_arn = Join("", log_group_parts)
log_stream_wild = Join("", log_group_parts + [":*"])
resources = [log_group_arn, log_stream_wild]
return [
Statement(
Effect=Allow,
Resource=resources,
Action=[
logs.CreateLogGroup,
logs.CreateLogStream,
logs.PutLogEvents
]
)
]
def lambda_basic_execution_statements(function_name):
log_group = Join("/", ["/aws/lambda", function_name])
return cloudwatch_logs_write_statements(log_group)
def lambda_basic_execution_policy(function_name):
return Policy(Statement=lambda_basic_execution_statements(function_name))
def lambda_vpc_execution_statements():
"""Allow Lambda to manipuate EC2 ENIs for VPC support."""
return [
Statement(
Effect=Allow,
Resource=['*'],
Action=[
ec2.CreateNetworkInterface,
ec2.DescribeNetworkInterfaces,
ec2.DeleteNetworkInterface,
]
)
]
def flowlogs_assumerole_policy():
return make_simple_assume_policy("vpc-flow-logs.amazonaws.com")
# reference: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html#cfn-dynamodb-table-examples-application-autoscaling # noqa
def dynamodb_autoscaling_policy(tables):
"""Policy to allow AutoScaling a list of DynamoDB tables."""
return Policy(
Statement=[
Statement(
Effect=Allow,
Resource=dynamodb_arns(tables),
Action=[
dynamodb.DescribeTable,
dynamodb.UpdateTable,
]
),
Statement(
Effect=Allow,
Resource=['*'],
Action=[
cloudwatch.PutMetricAlarm,
cloudwatch.DescribeAlarms,
cloudwatch.GetMetricStatistics,
cloudwatch.SetAlarmState,
cloudwatch.DeleteAlarms,
]
),
]
)
| bsd-2-clause | -2,938,821,598,821,418,500 | 25.808943 | 167 | 0.549659 | false | 3.963341 | false | false | false |
osborne6/luminotes | view/Page_navigation.py | 1 | 1736 | from Tags import P, Span, A, Strong
class Page_navigation( P ):
def __init__( self, page_path, displayed_item_count, total_item_count, start, items_per_page, return_text = None ):
if start is None or items_per_page is None:
P.__init__( self )
return
if displayed_item_count == 1 and displayed_item_count < total_item_count:
if not return_text:
P.__init__( self )
return
P.__init__(
self,
Span(
A(
return_text,
href = "%s" % page_path,
),
),
)
return
if start == 0 and items_per_page >= total_item_count:
P.__init__( self )
return
P.__init__(
self,
( start > 0 ) and Span(
A(
u"previous",
href = self.href( page_path, max( start - items_per_page, 0 ), items_per_page ),
),
u" | ",
) or None,
[ Span(
( start == page_start ) and Strong( unicode( page_number + 1 ) ) or A(
Strong( unicode( page_number + 1 ) ),
href = self.href( page_path, page_start, items_per_page ),
),
) for ( page_number, page_start ) in enumerate( range( 0, total_item_count, items_per_page ) ) ],
( start + items_per_page < total_item_count ) and Span(
u" | ",
A(
u"next",
href = self.href( page_path, min( start + items_per_page, total_item_count - 1 ), items_per_page ),
),
) or None,
)
@staticmethod
def href( page_path, start, count ):
# if start is zero, leave off start and count parameters and just use the defaults
if start == 0:
return page_path
return u"%s?start=%d&count=%d" % ( page_path, start, count )
| gpl-3.0 | -6,957,415,015,767,892,000 | 28.423729 | 117 | 0.522465 | false | 3.535642 | false | false | false |
tambetm/gymexperiments | a2c_atari.py | 1 | 10250 | import argparse
import os
import multiprocessing
from multiprocessing import Process, Queue, Array
import pickle
import gym
from gym.spaces import Box, Discrete
from keras.models import Model
from keras.layers import Input, TimeDistributed, Convolution2D, Flatten, LSTM, Dense
from keras.objectives import categorical_crossentropy
from keras.optimizers import Adam
from keras.utils import np_utils
import keras.backend as K
import numpy as np
from atari_utils import RandomizedResetEnv, AtariRescale42x42Env
def create_env(env_id):
env = gym.make(env_id)
env = RandomizedResetEnv(env)
env = AtariRescale42x42Env(env)
return env
def create_model(env, batch_size, num_steps):
# network inputs are observations and advantages
h = x = Input(batch_shape=(batch_size, num_steps) + env.observation_space.shape, name="x")
A = Input(batch_shape=(batch_size, num_steps), name="A")
# convolutional layers
h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c1')(h)
h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c2')(h)
h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c3')(h)
h = TimeDistributed(Convolution2D(64, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c4')(h)
h = TimeDistributed(Flatten(), name="fl")(h)
# recurrent layer
h = LSTM(32, return_sequences=True, stateful=True, name="r1")(h)
# policy network
p = TimeDistributed(Dense(env.action_space.n, activation='softmax'), name="p")(h)
# baseline network
b = TimeDistributed(Dense(1), name="b")(h)
# inputs to the model are observation and advantages,
# outputs are action probabilities and baseline
model = Model(input=[x, A], output=[p, b])
# policy gradient loss and entropy bonus
def policy_gradient_loss(l_sampled, l_predicted):
return K.mean(A * categorical_crossentropy(l_sampled, l_predicted), axis=1) \
- 0.01 * K.mean(categorical_crossentropy(l_predicted, l_predicted), axis=1)
# baseline is optimized with MSE
model.compile(optimizer='adam', loss=[policy_gradient_loss, 'mse'])
return model
def predict(model, observation):
# create inputs for batch (and timestep) of size 1
x = np.array([[observation]])
A = np.zeros((1, 1)) # dummy advantage
# predict action probabilities (and baseline state value)
p, b = model.predict_on_batch([x, A])
# return action probabilities and baseline
return p[0, 0], b[0, 0, 0]
def discount(rewards, terminals, v, gamma):
# calculate discounted future rewards for this trajectory
returns = []
# start with the predicted value of the last state
R = v
for r, t in zip(reversed(rewards), reversed(terminals)):
# if it was terminal state then restart from 0
if t:
R = 0
R = r + R * gamma
returns.insert(0, R)
return returns
def runner(shared_buffer, fifo, num_timesteps, monitor, args):
proc_name = multiprocessing.current_process().name
print("Runner %s started" % proc_name)
# local environment for runner
env = create_env(args.env_id)
# start monitor to record statistics and videos
if monitor:
env.monitor.start(args.env_id)
# copy of model
model = create_model(env, batch_size=1, num_steps=1)
# record episode lengths and rewards for statistics
episode_rewards = []
episode_lengths = []
episode_reward = 0
episode_length = 0
observation = env.reset()
for i in range(num_timesteps // args.num_local_steps):
# copy weights from main network at the beginning of iteration
# the main network's weights are only read, never modified
# but we create our own model instance, because Keras is not thread-safe
model.set_weights(pickle.loads(shared_buffer.raw))
observations = []
actions = []
rewards = []
terminals = []
baselines = []
for t in range(args.num_local_steps):
if args.display:
env.render()
# predict action probabilities (and baseline state value)
p, b = predict(model, observation)
# sample action using those probabilities
p /= np.sum(p) # ensure p-s sum up to 1
action = np.random.choice(env.action_space.n, p=p)
# log data
observations.append(observation)
actions.append(action)
baselines.append(b)
# step environment
observation, reward, terminal, _ = env.step(int(action))
rewards.append(reward)
terminals.append(terminal)
episode_reward += reward
episode_length += 1
# reset if terminal state
if terminal:
episode_rewards.append(episode_reward)
episode_lengths.append(episode_length)
episode_reward = 0
episode_length = 0
observation = env.reset()
# calculate discounted returns
if terminal:
# if the last was terminal state then start from 0
returns = discount(rewards, terminals, 0, 0.99)
else:
# otherwise calculate the value of the last state
_, v = predict(model, observation)
returns = discount(rewards, terminals, v, 0.99)
# convert to numpy arrays
observations = np.array(observations)
actions = np_utils.to_categorical(actions, env.action_space.n)
baselines = np.array(baselines)
returns = np.array(returns)
advantages = returns - baselines
# send observations, actions, rewards and returns. blocks if fifo is full.
fifo.put((observations, actions, returns, advantages, episode_rewards, episode_lengths))
episode_rewards = []
episode_lengths = []
if monitor:
env.monitor.close()
print("Runner %s finished" % proc_name)
def trainer(model, fifos, shared_buffer, args):
proc_name = multiprocessing.current_process().name
print("Trainer %s started" % proc_name)
episode_rewards = []
episode_lengths = []
timestep = 0
while len(multiprocessing.active_children()) > 0 and timestep < args.num_timesteps:
batch_observations = []
batch_actions = []
batch_returns = []
batch_advantages = []
# loop over fifos from all runners
for q, fifo in enumerate(fifos):
# wait for a new trajectory and statistics
observations, actions, returns, advantages, rewards, lengths = fifo.get()
# add to batch
batch_observations.append(observations)
batch_actions.append(actions)
batch_returns.append(returns)
batch_advantages.append(advantages)
# log statistics
episode_rewards += rewards
episode_lengths += lengths
timestep += len(observations)
# form training data from observations, actions and returns
x = np.array(batch_observations)
p = np.array(batch_actions)
R = np.array(batch_returns)[:, :, np.newaxis]
A = np.array(batch_advantages)
# anneal learning rate
model.optimizer.lr = max(0.001 * (args.num_timesteps - timestep) / args.num_timesteps, 0)
# train the model
total_loss, policy_loss, baseline_loss = model.train_on_batch([x, A], [p, R])
# share model parameters
shared_buffer.raw = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
if timestep % args.stats_interval == 0:
print("Step %d/%d: episodes %d, mean episode reward %.2f, mean episode length %.2f." %
(timestep, args.num_timesteps, len(episode_rewards), np.mean(episode_rewards), np.mean(episode_lengths)))
episode_rewards = []
episode_lengths = []
print("Trainer %s finished" % proc_name)
def run(args):
# create dummy environment to be able to create model
env = create_env(args.env_id)
assert isinstance(env.observation_space, Box)
assert isinstance(env.action_space, Discrete)
print("Observation space: " + str(env.observation_space))
print("Action space: " + str(env.action_space))
# create main model
model = create_model(env, batch_size=args.num_runners, num_steps=args.num_local_steps)
model.summary()
env.close()
# for better compatibility with Theano and Tensorflow
multiprocessing.set_start_method('spawn')
# create shared buffer for sharing weights
blob = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
shared_buffer = Array('c', len(blob))
shared_buffer.raw = blob
# force runner processes to use cpu, child processes inherit environment variables
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# create fifos and processes for all runners
fifos = []
for i in range(args.num_runners):
fifo = Queue(args.queue_length)
fifos.append(fifo)
process = Process(target=runner,
args=(shared_buffer, fifo, args.num_timesteps // args.num_runners, args.monitor and i == 0, args))
process.start()
# start trainer in main thread
trainer(model, fifos, shared_buffer, args)
print("All done")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parallelization
parser.add_argument('--num_runners', type=int, default=2)
parser.add_argument('--queue_length', type=int, default=2)
# how long
parser.add_argument('--num_timesteps', type=int, default=5000000)
parser.add_argument('--num_local_steps', type=int, default=20)
parser.add_argument('--stats_interval', type=int, default=10000)
# technical
parser.add_argument('--display', action='store_true', default=False)
parser.add_argument('--monitor', action='store_true', default=False)
# mandatory
parser.add_argument('env_id')
args = parser.parse_args()
run(args)
| mit | 6,621,470,067,187,979,000 | 34.590278 | 137 | 0.639805 | false | 3.831776 | false | false | false |
SymbiFlow/symbiflow-arch-defs | utils/lib/parse_route.py | 1 | 1541 | """ Library for parsing route output from VPR route files. """
from collections import namedtuple
Node = namedtuple('Node', 'inode x_low y_low x_high y_high ptc')
def format_name(s):
""" Converts VPR parenthesized name to just name. """
assert s[0] == '('
assert s[-1] == ')'
return s[1:-1]
def format_coordinates(coord):
""" Parses coordinates from VPR route file in format of (x,y). """
coord = format_name(coord)
x, y = coord.split(',')
return int(x), int(y)
def find_net_sources(f):
""" Yields tuple of (net string, Node namedtuple) from file object.
File object should be formatted as VPR route output file.
"""
net = None
for e in f:
tokens = e.strip().split()
if not tokens:
continue
elif tokens[0][0] == '#':
continue
elif tokens[0] == 'Net':
net = format_name(tokens[2])
elif e == "\n\nUsed in local cluster only, reserved one CLB pin\n\n":
continue
else:
if net is not None:
inode = int(tokens[1])
assert tokens[2] == 'SOURCE'
x, y = format_coordinates(tokens[3])
if tokens[4] == 'to':
x2, y2 = format_coordinates(tokens[5])
offset = 2
else:
x2, y2 = x, y
offset = 0
ptc = int(tokens[5 + offset])
yield net, Node(inode, x, y, x2, y2, ptc)
net = None
| isc | 6,817,013,597,185,511,000 | 27.018182 | 77 | 0.506814 | false | 3.862155 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.