repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
regardscitoyens/twitter-parlementaires | download_twitter.py | 1 | 1413 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, json
from twitter import Twitter, OAuth
from twitterconfig import KEY, SECRET, OAUTH_TOKEN, OAUTH_SECRET
if len(sys.argv) < 3:
sys.stderr.write("Please input both Twitter list's owner_screen_name and slug\n")
exit(1)
LIST_USER, LIST_ID = sys.argv[1:3]
if not os.path.isdir(".cache"):
os.makedirs(".cache")
t = Twitter(auth=OAuth(OAUTH_TOKEN, OAUTH_SECRET, KEY, SECRET))
accounts = {}
page = 1
args = {
"owner_screen_name": LIST_USER,
"include_entities": "false",
"skip_status": "true",
"count": 5000,
"cursor": -1
}
try:
args["list_id"] = long(LIST_ID)
except:
args["slug"] = LIST_ID
while args["cursor"]:
res = t.lists.members(**args)
with open(os.path.join('.cache', 'twitter-%s-%s.json' % (LIST_USER, args["cursor"] if args["cursor"] != -1 else 0)), 'w') as f:
json.dump(res, f)
args["cursor"] = res.get('next_cursor', res.get('next_cursor_str', 0))
new = 0
for account in res['users']:
name = account['screen_name'].lower()
if name not in accounts:
accounts[name] = account
new += 1
print("[INFO/%s] page %s -> %s results including %s new ; new total: %s" % (LIST_ID, page, len(res['users']), new, len(accounts)))
page += 1
with open(os.path.join('.cache', 'twitter-%s.json' % LIST_ID), 'w') as f:
json.dump(accounts, f)
| agpl-3.0 | -6,752,805,189,565,542,000 | 29.717391 | 134 | 0.602265 | false | 3.025696 | false | false | false |
senser/xmppBot | ZenPacks/community/xmppBot/Jabber/plugins/setjid.py | 1 | 4107 | """Check if the sender is a valid zenoss admin. For access control"""
from Jabber.Plugins import Plugin
from Jabber.ZenAdapter import ZenAdapter
from Jabber.Options import Options
from optparse import OptionError
import transaction
class SetJid(Plugin):
name = 'mapuser'
capabilities = ['setjid', 'mapuser', 'help']
def call(self, args, sender, log, **kw):
log.debug('mapuser plugin running with %s' % args)
opts = self.options()
# parse the options
try:
(options, arguments) = opts.parse_args(args)
log.debug('Done parsing arguments. Options are "%s", arguments expanded to %s' % (options, arguments))
except OptionError, message:
return str(message)
if options.zenUser is None or options.jabberId is None:
return 'NO. -u and -j are both required.'
adapter = ZenAdapter()
jabberId = options.jabberId.lower()
haveUser = False
for user in adapter.userSettings():
if user.id.lower() == options.zenUser.lower():
haveUser = True
try:
currentId = user.getProperty('JabberId')
except AttributeError:
currentId = False
if currentId:
if options.jabberId == currentId.lower():
if options.force:
self.mapIds(jabberId, user)
return 'This user mapping already looks like this. Forced option was used, so I set it anyway.'
else:
return 'This user mapping already looks like this.'
if '/' in sender:
sender = sender.split('/')[0]
if currentId.lower() == sender.lower():
if options.force:
return 'This is your Zenoss user id, and the mapping is already set correctly. Changing it will prevent you from communicating with me. If you really want to change it, do so from the Zenoss interface or -f.'
else:
self.mapIds(jabberId, user)
return 'This is your Zenoss user id, and the mapping is already set correctly. However, the force option was used, so I set it anyway. Since this will probably break communication with me, you can change it back from the Zope interface.'
log.debug('Setting the jabberid mapping property to %s for zenuser %s' % (jabberId, user))
self.mapIds(jabberId, user)
break
if haveUser:
return 'JabberId for this user has been saved. Thanks.'
else:
return 'Sorry! I Could not find a Zenoss user by the name %s' % options.zenUser
def mapIds(self, jabberId, zenUser):
self.setPropertyIfNeeded(zenUser)
zenUser._updateProperty('JabberId', jabberId)
transaction.commit()
def setPropertyIfNeeded(self, zenUser):
if not zenUser.hasProperty('JabberId'):
zenUser.manage_addProperty('JabberId', '', 'string')
zenUser._setProperty('JabberId', '', 'string')
try:
zenUser.getProperty('JabberId')
except AttributeError:
zenUser.manage_addProperty('JabberId', '', 'string')
# unnecessary?
#zenUser._setProperty('JabberId', '', 'string')
def private(self):
return False
def options(self):
parser = Options(description = 'Acknowledge events by eventid', prog = 'ack')
parser.add_option('-u', '--user', dest='zenUser', help='Zenoss username (must already exist in zenoss).')
parser.add_option('-j', '--jid', dest='jabberId', help='JabberID to map to the zenoss user.')
parser.add_option('-f', '--force', dest='force', action='store_true', help='Force association even if it could disallow your own user. USE WITH CAUTION.')
return parser
def help(self):
opts = self.options()
return str(opts.help())
| gpl-2.0 | -3,075,416,412,988,753,400 | 45.146067 | 267 | 0.582177 | false | 4.359873 | false | false | false |
daymer/xWIKI_Karma | CustomModules/mysql-connector-python-2.1.7/lib/cpy_distutils.py | 1 | 24414 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implements the DistUtils command 'build_ext'
"""
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.errors import DistutilsExecError
from distutils.util import get_platform
from distutils.dir_util import copy_tree
from distutils import log
from glob import glob
import os
import shlex
import struct
from subprocess import Popen, PIPE, STDOUT, check_call
import sys
import platform
import shutil
ARCH_64BIT = sys.maxsize > 2**32 # Works with Python 2.6 and greater
py_arch = '64-bit' if ARCH_64BIT else '32-bit'
CEXT_OPTIONS = [
('with-mysql-capi=', None,
"Location of MySQL C API installation or path to mysql_config"),
('extra-compile-args=', None,
"Extra compile args"),
('extra-link-args=', None,
"Extra link args")
]
CEXT_STATIC_OPTIONS = [
('static', None,
"Link C libraries statically with the C Extension"),
]
INSTALL_OPTIONS = [
('byte-code-only=', None,
"Remove Python .py files; leave byte code .pyc only"),
]
def win_dll_is64bit(dll_file):
"""Check if a Windows DLL is 64 bit or not
Returns True if the library dll_file is 64bit.
Raises ValueError when magic of header is invalid.
Raises IOError when file could not be read.
Raises OSError when execute on none-Windows platform.
Returns True or False.
"""
if os.name != 'nt':
raise OSError("win_ddl_is64bit only useful on Windows")
with open(dll_file, 'rb') as fp:
# IMAGE_DOS_HEADER
e_magic = fp.read(2)
if e_magic != b'MZ':
raise ValueError("Wrong magic in header")
fp.seek(60)
offset = struct.unpack("I", fp.read(4))[0]
# IMAGE_FILE_HEADER
fp.seek(offset)
file_header = fp.read(6)
(signature, machine) = struct.unpack("<4sH", file_header)
if machine == 0x014c: # IMAGE_FILE_MACHINE_I386
return False
elif machine in (0x8664, 0x2000): # IMAGE_FILE_MACHINE_I386/AMD64
return True
def unix_lib_is64bit(lib_file):
"""Check if a library on UNIX is 64 bit or not
This function uses the `file` command to check if a library on
UNIX-like platforms is 32 or 64 bit.
Returns True if the library is 64bit.
Raises ValueError when magic of header is invalid.
Raises IOError when file could not be read.
Raises OSError when execute on none-Windows platform.
Returns True or False.
"""
if os.name != 'posix':
raise OSError("unix_lib_is64bit only useful on UNIX-like systems")
if os.isdir(lib_file):
mysqlclient_libs = []
for root, _, files in os.walk(lib_file):
for filename in files:
filepath = os.path.join(root, filename)
if filename.startswith('libmysqlclient') and \
not os.path.islink(filepath) and \
'_r' not in filename and \
'.a' not in filename:
mysqlclient_libs.append(filepath)
if mysqlclient_libs:
break
# give priority to .so files instead of .a
mysqlclient_libs.sort()
lib_file = mysqlclient_libs[-1]
log.debug("# Using file command to test lib_file {0}".format(lib_file))
if platform.uname() == 'SunOS':
cmd_list = ['file', '-L', lib_file]
else:
cmd_list = ['file', '-L', lib_file]
prc = Popen(cmd_list, stdin=PIPE, stderr=STDOUT,
stdout=PIPE)
stdout = prc.communicate()[0]
stdout = stdout.split(':')[1]
log.debug("# lib_file {0} stdout: {1}".format(lib_file, stdout))
if 'x86_64' in stdout or 'x86-64' in stdout or '32-bit' not in stdout:
return True
return False
def parse_mysql_config_info(options, stdout):
log.debug("# stdout: {0}".format(stdout))
info = {}
for option, line in zip(options, stdout.split('\n')):
log.debug("# option: {0}".format(option))
log.debug("# line: {0}".format(line))
info[option] = line.strip()
ver = info['version']
if '-' in ver:
ver, _ = ver.split('-', 2)
info['version'] = tuple([int(v) for v in ver.split('.')[0:3]])
libs = shlex.split(info['libs'])
info['lib_dir'] = libs[0].replace('-L', '')
info['libs'] = [ lib.replace('-l', '') for lib in libs[1:] ]
if platform.uname()[0] == 'SunOS':
info['lib_dir'] = info['lib_dir'].replace('-R', '')
info['libs'] = [lib.replace('-R', '') for lib in info['libs']]
log.debug("# info['libs']: ")
for lib in info['libs']:
log.debug("# {0}".format(lib))
libs = shlex.split(info['libs_r'])
info['lib_r_dir'] = libs[0].replace('-L', '')
info['libs_r'] = [ lib.replace('-l', '') for lib in libs[1:] ]
info['include'] = [x.strip() for x in info['include'].split('-I')[1:]]
return info
def get_mysql_config_info(mysql_config):
"""Get MySQL information using mysql_config tool
Returns a dict.
"""
options = ['cflags', 'include', 'libs', 'libs_r', 'plugindir', 'version']
cmd = [mysql_config] + [ "--{0}".format(opt) for opt in options ]
try:
proc = Popen(cmd, stdout=PIPE, universal_newlines=True)
stdout, _ = proc.communicate()
except OSError as exc:
raise DistutilsExecError("Failed executing mysql_config: {0}".format(
str(exc)))
info = parse_mysql_config_info(options, stdout)
# Try to figure out the architecture
info['arch'] = None
if os.name == 'posix':
if platform.uname()[0] == 'SunOS':
print("info['lib_dir']: {0}".format(info['lib_dir']))
print("info['libs'][0]: {0}".format(info['libs'][0]))
pathname = os.path.abspath(os.path.join(info['lib_dir'],
'lib',
info['libs'][0])) + '/*'
else:
pathname = os.path.join(info['lib_dir'],
'lib' + info['libs'][0]) + '*'
print("# Looking mysqlclient_lib at path: {0}".format(pathname))
log.debug("# searching mysqlclient_lib at: %s", pathname)
libs = glob(pathname)
mysqlclient_libs = []
for filepath in libs:
_, filename = os.path.split(filepath)
log.debug("# filename {0}".format(filename))
if filename.startswith('libmysqlclient') and \
not os.path.islink(filepath) and \
'_r' not in filename and \
'.a' not in filename:
mysqlclient_libs.append(filepath)
mysqlclient_libs.sort()
stdout = None
try:
log.debug("# mysqlclient_lib: {0}".format(mysqlclient_libs[-1]))
for mysqlclient_lib in mysqlclient_libs:
log.debug("#+ {0}".format(mysqlclient_lib))
log.debug("# tested mysqlclient_lib[-1]: "
"{0}".format(mysqlclient_libs[-1]))
if platform.uname()[0] == 'SunOS':
print("mysqlclient_lib: {0}".format(mysqlclient_libs[-1]))
cmd_list = ['file', mysqlclient_libs[-1]]
else:
cmd_list = ['file', '-L', mysqlclient_libs[-1]]
proc = Popen(cmd_list, stdout=PIPE,
universal_newlines=True)
stdout, _ = proc.communicate()
stdout = stdout.split(':')[1]
except OSError as exc:
raise DistutilsExecError(
"Although the system seems POSIX, the file-command could not "
"be executed: {0}".format(str(exc)))
if stdout:
if '64' in stdout:
info['arch'] = "x86_64"
else:
info['arch'] = "i386"
else:
raise DistutilsExecError(
"Failed getting out put from the file-command"
)
else:
raise DistutilsExecError(
"Cannot determine architecture on {0} systems".format(os.name))
return info
def remove_cext(distribution):
"""Remove the C Extension from the distribution
This function can be useful in Distutils commands for creating
pure Python modules.
"""
to_remove = []
for ext_mod in distribution.ext_modules:
if ext_mod.name == '_mysql_connector':
to_remove.append(ext_mod)
for ext_mod in to_remove:
distribution.ext_modules.remove(ext_mod)
class BuildExtDynamic(build_ext):
"""Build Connector/Python C Extension"""
description = "build Connector/Python C Extension"
user_options = build_ext.user_options + CEXT_OPTIONS
min_connector_c_version = None
arch = None
_mysql_config_info = None
def initialize_options(self):
build_ext.initialize_options(self)
self.extra_compile_args = None
self.extra_link_args = None
self.with_mysql_capi = None
def _finalize_connector_c(self, connc_loc):
"""Finalize the --with-connector-c command line argument
"""
platform = get_platform()
self._mysql_config_info = None
min_version = BuildExtDynamic.min_connector_c_version
err_invalid_loc = "MySQL C API location is invalid; was %s"
mysql_config = None
err_version = "MySQL C API {0}.{1}.{2} or later required".format(
*BuildExtDynamic.min_connector_c_version)
if not os.path.exists(connc_loc):
log.error(err_invalid_loc, connc_loc)
sys.exit(1)
if os.path.isdir(connc_loc):
# if directory, and no mysql_config is available, figure out the
# lib/ and include/ folders from the the filesystem
mysql_config = os.path.join(connc_loc, 'bin', 'mysql_config')
if os.path.isfile(mysql_config) and \
os.access(mysql_config, os.X_OK):
connc_loc = mysql_config
log.debug("# connc_loc: {0}".format(connc_loc))
else:
# Probably using MS Windows
myconfigh = os.path.join(connc_loc, 'include', 'my_config.h')
if not os.path.exists(myconfigh):
log.error("MySQL C API installation invalid "
"(my_config.h not found)")
sys.exit(1)
else:
with open(myconfigh, 'rb') as fp:
for line in fp.readlines():
if b'#define VERSION' in line:
version = tuple([
int(v) for v in
line.split()[2].replace(
b'"', b'').split(b'.')
])
if version < min_version:
log.error(err_version);
sys.exit(1)
break
# On Windows we check libmysql.dll
if os.name == 'nt':
lib = os.path.join(self.with_mysql_capi, 'lib',
'libmysql.dll')
connc_64bit = win_dll_is64bit(lib)
# On OSX we check libmysqlclient.dylib
elif 'macos' in platform:
lib = os.path.join(self.with_mysql_capi, 'lib',
'libmysqlclient.dylib')
connc_64bit = unix_lib_is64bit(lib)
# On other Unices we check libmysqlclient (follow symlinks)
elif os.name == 'posix':
connc_64bit = unix_lib_is64bit(connc_loc)
else:
raise OSError("Unsupported platform: %s" % os.name)
include_dirs = [os.path.join(connc_loc, 'include')]
if os.name == 'nt':
libraries = ['libmysql']
else:
libraries = ['-lmysqlclient']
library_dirs = os.path.join(connc_loc, 'lib')
log.debug("# connc_64bit: {0}".format(connc_64bit))
if connc_64bit:
self.arch = 'x86_64'
else:
self.arch = 'i386'
# We were given the location of the mysql_config tool (not on Windows)
if not os.name == 'nt' and os.path.isfile(connc_loc) \
and os.access(connc_loc, os.X_OK):
mysql_config = connc_loc
# Check mysql_config
myc_info = get_mysql_config_info(mysql_config)
log.debug("# myc_info: {0}".format(myc_info))
if myc_info['version'] < min_version:
log.error(err_version)
sys.exit(1)
include_dirs = myc_info['include']
libraries = myc_info['libs']
library_dirs = myc_info['lib_dir']
self._mysql_config_info = myc_info
self.arch = self._mysql_config_info['arch']
connc_64bit = self.arch == 'x86_64'
for include_dir in include_dirs:
if not os.path.exists(include_dir):
log.error(err_invalid_loc, connc_loc)
sys.exit(1)
# Set up the build_ext class
self.include_dirs.extend(include_dirs)
self.libraries.extend(libraries)
self.library_dirs.append(library_dirs)
# We try to offer a nice message when the architecture of Python
# is not the same as MySQL Connector/C binaries.
print("# self.arch: {0}".format(self.arch))
if ARCH_64BIT != connc_64bit:
log.error("Python is {0}, but does not "
"match MySQL C API {1} architecture, "
"type: {2}"
"".format(py_arch,
'64-bit' if connc_64bit else '32-bit',
self.arch))
sys.exit(1)
def finalize_options(self):
self.set_undefined_options(
'install',
('extra_compile_args', 'extra_compile_args'),
('extra_link_args', 'extra_link_args'),
('with_mysql_capi', 'with_mysql_capi'))
build_ext.finalize_options(self)
print("# Python architecture: {0}".format(py_arch))
print("# Python ARCH_64BIT: {0}".format(ARCH_64BIT))
if self.with_mysql_capi:
self._finalize_connector_c(self.with_mysql_capi)
def fix_compiler(self):
platform = get_platform()
cc = self.compiler
if not cc:
return
if 'macosx-10.9' in platform:
for needle in ['-mno-fused-madd']:
try:
cc.compiler.remove(needle)
cc.compiler_so.remove(needle)
except ValueError:
# We are removing, so OK when needle not there
pass
for name, args in cc.__dict__.items():
if not args or not isinstance(args, list):
continue
new_args = []
enum_args = enumerate(args)
for i, arg in enum_args:
if arg == '-arch':
# Skip not needed architecture
if args[i+1] != self.arch:
next(enum_args)
else:
new_args.append(arg)
else:
new_args.append(arg)
try:
cc.setattr(name, new_args)
except AttributeError:
# Old class
cc.__dict__[name] = new_args
# Add system headers to Extensions extra_compile_args
sysheaders = [ '-isystem' + dir for dir in cc.include_dirs]
for ext in self.extensions:
# Add extra compile args
if self.extra_compile_args:
ext.extra_compile_args.extend(self.extra_compile_args.split())
# Add extra link args
if self.extra_link_args:
ext.extra_link_args.extend(self.extra_link_args.split())
# Add system headers
for sysheader in sysheaders:
if sysheader not in ext.extra_compile_args:
ext.extra_compile_args.append(sysheader)
# Stop warnings about unknown pragma
if os.name != 'nt':
ext.extra_compile_args.append('-Wno-unknown-pragmas')
def run(self):
"""Run the command"""
if os.name == 'nt':
for ext in self.extensions:
# Use the multithread, static version of the run-time library
ext.extra_compile_args.append("/MT")
# Add extra compile args
if self.extra_compile_args:
ext.extra_compile_args.extend(self.extra_compile_args.split())
# Add extra link args
if self.extra_link_args:
ext.extra_link_args.extend(self.extra_link_args.split())
build_ext.run(self)
else:
self.real_build_extensions = self.build_extensions
self.build_extensions = lambda: None
build_ext.run(self)
self.fix_compiler()
self.real_build_extensions()
class BuildExtStatic(BuildExtDynamic):
"""Build and Link libraries statically with the C Extensions"""
user_options = build_ext.user_options + CEXT_OPTIONS
def finalize_options(self):
install_obj = self.distribution.get_command_obj('install')
install_obj.with_mysql_capi = self.with_mysql_capi
install_obj.extra_compile_args = self.extra_compile_args
install_obj.extra_link_args = self.extra_link_args
install_obj.static = True
options_pairs = []
if not self.extra_compile_args:
options_pairs.append(('extra_compile_args', 'extra_compile_args'))
if not self.extra_link_args:
options_pairs.append(('extra_link_args', 'extra_link_args'))
if not self.with_mysql_capi:
options_pairs.append(('with_mysql_capi', 'with_mysql_capi'))
if options_pairs:
self.set_undefined_options('install', *options_pairs)
build_ext.finalize_options(self)
print("# Python architecture: {0}".format(py_arch))
print("# Python ARCH_64BIT: {0}".format(ARCH_64BIT))
self.connc_lib = os.path.join(self.build_temp, 'connc', 'lib')
self.connc_include = os.path.join(self.build_temp, 'connc', 'include')
if self.with_mysql_capi:
self._finalize_connector_c(self.with_mysql_capi)
def _finalize_connector_c(self, connc_loc):
if not os.path.isdir(connc_loc):
log.error("MySQL C API should be a directory")
sys.exit(1)
log.info("Copying MySQL libraries")
copy_tree(os.path.join(connc_loc, 'lib'), self.connc_lib)
log.info("Copying MySQL header files")
copy_tree(os.path.join(connc_loc, 'include'), self.connc_include)
# Remove all but static libraries to force static linking
if os.name == 'posix':
log.info("Removing non-static MySQL libraries from %s" % self.connc_lib)
for lib_file in os.listdir(self.connc_lib):
lib_file_path = os.path.join(self.connc_lib, lib_file)
if os.path.isfile(lib_file_path) and not lib_file.endswith('.a'):
os.unlink(os.path.join(self.connc_lib, lib_file))
def fix_compiler(self):
BuildExtDynamic.fix_compiler(self)
include_dirs = []
library_dirs = []
libraries = []
if os.name == 'posix':
include_dirs.append(self.connc_include)
library_dirs.append(self.connc_lib)
if self.with_mysql_capi:
libraries.append("mysqlclient")
# As we statically link and the "libmysqlclient.a" library
# carry no information what it depends on, we need to
# manually add library dependencies here.
if platform.system() not in ["Darwin", "Windows"]:
libraries.append("rt")
for ext in self.extensions:
ext.include_dirs.extend(include_dirs)
ext.library_dirs.extend(library_dirs)
ext.libraries.extend(libraries)
# Add extra compile args
if self.extra_compile_args:
ext.extra_compile_args.extend(self.extra_compile_args.split())
# Add extra link args
if self.extra_link_args:
ext.extra_link_args.extend(self.extra_link_args.split())
class InstallLib(install_lib):
user_options = install_lib.user_options + CEXT_OPTIONS + INSTALL_OPTIONS
boolean_options = ['byte-code-only']
def initialize_options(self):
install_lib.initialize_options(self)
self.byte_code_only = None
def finalize_options(self):
install_lib.finalize_options(self)
self.set_undefined_options('install',
('byte_code_only', 'byte_code_only'))
self.set_undefined_options('build', ('build_base', 'build_dir'))
def run(self):
self.build()
outfiles = self.install()
# (Optionally) compile .py to .pyc
if outfiles is not None and self.distribution.has_pure_modules():
self.byte_compile(outfiles)
if self.byte_code_only:
for source_file in outfiles:
if os.path.join('mysql', '__init__.py') in source_file:
continue
log.info("Removing %s", source_file)
os.remove(source_file)
class Install(install):
"""Install Connector/Python C Extension"""
description = "install MySQL Connector/Python"
user_options = install.user_options + CEXT_OPTIONS + INSTALL_OPTIONS + \
CEXT_STATIC_OPTIONS
boolean_options = ['byte-code-only', 'static']
need_ext = False
def initialize_options(self):
install.initialize_options(self)
self.extra_compile_args = None
self.extra_link_args = None
self.with_mysql_capi = None
self.byte_code_only = None
self.static = None
def finalize_options(self):
if self.static:
log.info("Linking C Extension statically with libraries")
self.distribution.cmdclass['build_ext'] = BuildExtStatic
if self.byte_code_only is None:
self.byte_code_only = False
build_ext_obj = self.distribution.get_command_obj('build_ext')
build_ext_obj.with_mysql_capi = self.with_mysql_capi
build_ext_obj.extra_compile_args = self.extra_compile_args
build_ext_obj.extra_link_args = self.extra_link_args
build_ext_obj.static = self.static
if self.with_mysql_capi:
self.need_ext = True
if not self.need_ext:
remove_cext(self.distribution)
install.finalize_options(self)
def run(self):
if not self.need_ext:
log.info("Not Installing MySQL C Extension")
else:
log.info("Installing MySQL C Extension")
install.run(self)
| apache-2.0 | -3,967,664,341,499,863,600 | 35.712782 | 84 | 0.560908 | false | 3.964599 | true | false | false |
thorwhalen/ut | ml/skwrap/feature_extraction/dict_vectorizer.py | 1 | 7588 |
__author__ = 'thor'
from sklearn.feature_extraction import DictVectorizer
from sklearn.externals import six
import numpy as np
from pandas import DataFrame
from collections import Counter
class IterDictVectorizer(DictVectorizer):
"""Transforms lists of feature-value mappings or rows of a dataframe to vectors.
It is like DictVectorizer (whose description was copied below), but:
(1) works with pandas DataFrame X input (rows become feature-value mappings dict)
(2) a minimum number of feature=value counts can be specified (by min_count)
(3) The fit is faster than with DictVectorizer (at least with DataFrame input)
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
min_count: positive float or int:
If min_count >= 1, min_count is the minimum number of feature=value count.
If min_count < 1, min_count represent the minimum proportion of the data that should have feature=value
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
>>> from ut.ml.skwrap.feature_extraction import IterDictVectorizer
>>> from pandas import DataFrame
>>> v = IterDictVectorizer(sparse=False)
>>> D = DataFrame([{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}])
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True, sort=True, min_count=0):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
self.min_count = min_count
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
feature_template = "{}" + self.separator + "{}"
if isinstance(X, DataFrame):
counts_of = dict()
for col, val in X.items():
counts_of[col] = Counter(val.dropna())
self.feature_counts_ = {}
_min_count = self.min_count
if self.min_count < 1:
_min_count *= len(X)
else:
_min_count = self.min_count
self.df_columns_ = set()
for k, v in counts_of.items():
for kk, vv in v.items():
if vv >= _min_count:
self.feature_counts_[feature_template.format(k, kk)] = vv
self.df_columns_.add(k)
feature_names = list(self.feature_counts_.keys())
else:
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = feature_template.format(f, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def transform(self, X, y=None):
if isinstance(X, DataFrame):
X = map(lambda x: x[1].dropna().to_dict(), X.iterrows())
return super(IterDictVectorizer, self).transform(X)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
class IterDictVectorizerWithText(object):
def __init__(self, dtype=np.float64, separator="=", sparse=True, sort=True, min_count=0,
text_vectorizers={}):
self.dict_vectorizer = IterDictVectorizer(
dtype=dtype, separator=separator, sparse=sparse, sort=sort, min_count=min_count
)
self.text_vectorizers = text_vectorizers
def fit(self, X, y=None):
# input validation
assert isinstance(X, DataFrame), "X must be a pandas DataFrame"
if not set(self.text_vectorizers.keys()).issubset(X.columns):
RuntimeError("The following columns were specified in text_vectorizers, but were not in X:\n" +
" {}".format(set(self.text_vectorizers.keys()).difference(X.columns)))
# carry out the normal IterDictVectorizer.fit() for columns not in text_vectorizers
self.dict_vectorizer_cols_ = set(X.columns).difference(list(self.text_vectorizers.keys()))
self.dict_vectorizer.fit(X[self.dict_vectorizer_cols_])
self.vocabulary_ = self.dict_vectorizer.vocabulary_
# use the CounterVectorizers of text_vectorizers to fit the specified string columns
for col in set(X.columns).intersection(list(self.text_vectorizers.keys())):
self.text_vectorizers[col].fit(X[col])
offset = len(self.vocabulary_)
self.vocabulary_ = dict(self.vocabulary_,
**{k : v + offset for k, v in self.text_vectorizers[col].items()})
self.feature_names_ = list(self.vocabulary_.keys())
def transform(self, X, y=None):
X1 = self.dict_vectorizer.transform(X[self.dict_vectorizer_cols_])
X2 = np.hstack((map(lambda col: self.text_vectorizers[col].transform(X[col]), list(self.text_vectorizers.keys()))))
return np.hstack((X1, X2))
| mit | 8,670,410,577,151,383,000 | 37.912821 | 123 | 0.599895 | false | 4.031881 | false | false | false |
pebble/spacel-provision | src/test/provision/orbit/test_provider.py | 1 | 1145 | from mock import MagicMock
from spacel.provision.orbit.provider import ProviderOrbitFactory
from test import BaseSpaceAppTest, ORBIT_REGION
TEST_PROVIDER = 'test'
class TestProviderOrbitFactory(BaseSpaceAppTest):
def setUp(self):
super(TestProviderOrbitFactory, self).setUp()
self.provider = MagicMock()
self.orbit_factory = ProviderOrbitFactory({
TEST_PROVIDER: self.provider
})
self.orbit.regions[ORBIT_REGION].provider = TEST_PROVIDER
def test_get_orbit(self):
self.orbit_factory.orbit(self.orbit)
self.provider.orbit.assert_called_once_with(self.orbit,
regions=[ORBIT_REGION])
def test_get_orbit_provider_not_found(self):
self.orbit.regions[ORBIT_REGION].provider = 'does-not-exist'
self.orbit_factory.orbit(self.orbit)
self.provider.orbit.assert_not_called()
def test_get(self):
orbit_factory = ProviderOrbitFactory.get(None, None, None, None, None,
None)
self.assertEqual(2, len(orbit_factory._providers))
| mit | 3,872,497,890,873,925,000 | 34.78125 | 78 | 0.638428 | false | 4.03169 | true | false | false |
tensorflow/addons | tensorflow_addons/image/dense_image_warp.py | 1 | 10213 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image warping using per-pixel flow vectors."""
import tensorflow as tf
from tensorflow_addons.utils import types
from typing import Optional
@tf.function
def interpolate_bilinear(
grid: types.TensorLike,
query_points: types.TensorLike,
indexing: str = "ij",
name: Optional[str] = None,
) -> tf.Tensor:
"""Similar to Matlab's interp2 function.
Finds values for query points on a grid using bilinear interpolation.
Args:
grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.
query_points: a 3-D float `Tensor` of N points with shape
`[batch, N, 2]`.
indexing: whether the query points are specified as row and column (ij),
or Cartesian coordinates (xy).
name: a name for the operation (optional).
Returns:
values: a 3-D `Tensor` with shape `[batch, N, channels]`
Raises:
ValueError: if the indexing mode is invalid, or if the shape of the
inputs invalid.
"""
if indexing != "ij" and indexing != "xy":
raise ValueError("Indexing mode must be 'ij' or 'xy'")
with tf.name_scope(name or "interpolate_bilinear"):
grid = tf.convert_to_tensor(grid)
query_points = tf.convert_to_tensor(query_points)
# grid shape checks
grid_static_shape = grid.shape
grid_shape = tf.shape(grid)
if grid_static_shape.dims is not None:
if len(grid_static_shape) != 4:
raise ValueError("Grid must be 4D Tensor")
if grid_static_shape[1] is not None and grid_static_shape[1] < 2:
raise ValueError("Grid height must be at least 2.")
if grid_static_shape[2] is not None and grid_static_shape[2] < 2:
raise ValueError("Grid width must be at least 2.")
# query_points shape checks
query_static_shape = query_points.shape
query_shape = tf.shape(query_points)
if query_static_shape.dims is not None:
if len(query_static_shape) != 3:
raise ValueError("Query points must be 3 dimensional.")
query_hw = query_static_shape[2]
if query_hw is not None and query_hw != 2:
raise ValueError("Query points last dimension must be 2.")
batch_size, height, width, channels = (
grid_shape[0],
grid_shape[1],
grid_shape[2],
grid_shape[3],
)
num_queries = query_shape[1]
query_type = query_points.dtype
grid_type = grid.dtype
alphas = []
floors = []
ceils = []
index_order = [0, 1] if indexing == "ij" else [1, 0]
unstacked_query_points = tf.unstack(query_points, axis=2, num=2)
for i, dim in enumerate(index_order):
with tf.name_scope("dim-" + str(dim)):
queries = unstacked_query_points[dim]
size_in_indexing_dimension = grid_shape[i + 1]
# max_floor is size_in_indexing_dimension - 2 so that max_floor + 1
# is still a valid index into the grid.
max_floor = tf.cast(size_in_indexing_dimension - 2, query_type)
min_floor = tf.constant(0.0, dtype=query_type)
floor = tf.math.minimum(
tf.math.maximum(min_floor, tf.math.floor(queries)), max_floor
)
int_floor = tf.cast(floor, tf.dtypes.int32)
floors.append(int_floor)
ceil = int_floor + 1
ceils.append(ceil)
# alpha has the same type as the grid, as we will directly use alpha
# when taking linear combinations of pixel values from the image.
alpha = tf.cast(queries - floor, grid_type)
min_alpha = tf.constant(0.0, dtype=grid_type)
max_alpha = tf.constant(1.0, dtype=grid_type)
alpha = tf.math.minimum(tf.math.maximum(min_alpha, alpha), max_alpha)
# Expand alpha to [b, n, 1] so we can use broadcasting
# (since the alpha values don't depend on the channel).
alpha = tf.expand_dims(alpha, 2)
alphas.append(alpha)
flattened_grid = tf.reshape(grid, [batch_size * height * width, channels])
batch_offsets = tf.reshape(
tf.range(batch_size) * height * width, [batch_size, 1]
)
# This wraps tf.gather. We reshape the image data such that the
# batch, y, and x coordinates are pulled into the first dimension.
# Then we gather. Finally, we reshape the output back. It's possible this
# code would be made simpler by using tf.gather_nd.
def gather(y_coords, x_coords, name):
with tf.name_scope("gather-" + name):
linear_coordinates = batch_offsets + y_coords * width + x_coords
gathered_values = tf.gather(flattened_grid, linear_coordinates)
return tf.reshape(gathered_values, [batch_size, num_queries, channels])
# grab the pixel values in the 4 corners around each query point
top_left = gather(floors[0], floors[1], "top_left")
top_right = gather(floors[0], ceils[1], "top_right")
bottom_left = gather(ceils[0], floors[1], "bottom_left")
bottom_right = gather(ceils[0], ceils[1], "bottom_right")
# now, do the actual interpolation
with tf.name_scope("interpolate"):
interp_top = alphas[1] * (top_right - top_left) + top_left
interp_bottom = alphas[1] * (bottom_right - bottom_left) + bottom_left
interp = alphas[0] * (interp_bottom - interp_top) + interp_top
return interp
def _get_dim(x, idx):
if x.shape.ndims is None:
return tf.shape(x)[idx]
return x.shape[idx] or tf.shape(x)[idx]
@tf.function
def dense_image_warp(
image: types.TensorLike, flow: types.TensorLike, name: Optional[str] = None
) -> tf.Tensor:
"""Image warping using per-pixel flow vectors.
Apply a non-linear warp to the image, where the warp is specified by a
dense flow field of offset vectors that define the correspondences of
pixel values in the output image back to locations in the source image.
Specifically, the pixel value at `output[b, j, i, c]` is
`images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c]`.
The locations specified by this formula do not necessarily map to an int
index. Therefore, the pixel value is obtained by bilinear
interpolation of the 4 nearest pixels around
`(b, j - flow[b, j, i, 0], i - flow[b, j, i, 1])`. For locations outside
of the image, we use the nearest pixel values at the image boundary.
NOTE: The definition of the flow field above is different from that
of optical flow. This function expects the negative forward flow from
output image to source image. Given two images `I_1` and `I_2` and the
optical flow `F_12` from `I_1` to `I_2`, the image `I_1` can be
reconstructed by `I_1_rec = dense_image_warp(I_2, -F_12)`.
Args:
image: 4-D float `Tensor` with shape `[batch, height, width, channels]`.
flow: A 4-D float `Tensor` with shape `[batch, height, width, 2]`.
name: A name for the operation (optional).
Note that image and flow can be of type `tf.half`, `tf.float32`, or
`tf.float64`, and do not necessarily have to be the same type.
Returns:
A 4-D float `Tensor` with shape`[batch, height, width, channels]`
and same type as input image.
Raises:
ValueError: if `height < 2` or `width < 2` or the inputs have the wrong
number of dimensions.
"""
with tf.name_scope(name or "dense_image_warp"):
image = tf.convert_to_tensor(image)
flow = tf.convert_to_tensor(flow)
batch_size, height, width, channels = (
_get_dim(image, 0),
_get_dim(image, 1),
_get_dim(image, 2),
_get_dim(image, 3),
)
# The flow is defined on the image grid. Turn the flow into a list of query
# points in the grid space.
grid_x, grid_y = tf.meshgrid(tf.range(width), tf.range(height))
stacked_grid = tf.cast(tf.stack([grid_y, grid_x], axis=2), flow.dtype)
batched_grid = tf.expand_dims(stacked_grid, axis=0)
query_points_on_grid = batched_grid - flow
query_points_flattened = tf.reshape(
query_points_on_grid, [batch_size, height * width, 2]
)
# Compute values at the query points, then reshape the result back to the
# image grid.
interpolated = interpolate_bilinear(image, query_points_flattened)
interpolated = tf.reshape(interpolated, [batch_size, height, width, channels])
return interpolated
@tf.function(experimental_implements="addons:DenseImageWarp")
def dense_image_warp_annotated(
image: types.TensorLike, flow: types.TensorLike, name: Optional[str] = None
) -> tf.Tensor:
"""Similar to dense_image_warp but annotated with experimental_implements.
IMPORTANT: This is a temporary function and will be removed after TensorFlow's
next release.
This annotation make the serialized function detectable by the TFLite MLIR
converter and allow the converter to convert it to corresponding TFLite op.
However, with the annotation, this function cannot be used with backprop
under `tf.GradientTape` objects.
"""
return dense_image_warp(image, flow, name)
| apache-2.0 | 6,193,261,979,639,195,000 | 41.202479 | 87 | 0.61784 | false | 3.840918 | false | false | false |
Lana-B/Pheno4T | madanalysis/layout/histogram_core.py | 1 | 2479 | ################################################################################
#
# Copyright (C) 2012-2013 Eric Conte, Benjamin Fuks
# The MadAnalysis development team, email: <[email protected]>
#
# This file is part of MadAnalysis 5.
# Official website: <https://launchpad.net/madanalysis5>
#
# MadAnalysis 5 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MadAnalysis 5 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MadAnalysis 5. If not, see <http://www.gnu.org/licenses/>
#
################################################################################
import logging
from math import sqrt
class HistogramCore:
def __init__(self):
import numpy
self.integral = 0
self.nevents = 0
self.nentries = 0
self.sumwentries = 0
self.sumw = 0
self.sumw2 = 0
self.sumwx = 0
self.sumw2x = 0
self.underflow = 0
self.overflow = 0
self.array = numpy.array([])
def ComputeIntegral(self):
self.integral = 0
for i in range(0,len(self.array)):
self.integral+=self.array[i]
self.integral += self.overflow
self.integral += self.underflow
def Print(self):
logging.info('nevents='+str(self.nevents)+\
' entries='+str(self.entries))
logging.info('sumw='+str(self.sumw)+\
' sumw2='+str(self.sumw2)+\
' sumwx='+str(self.sumwx)+\
' sumw2x='+str(self.sumw2x))
logging.info('underflow='+str(self.underflow)+\
' overflow='+str(self.overflow))
def GetMean(self):
if self.sumw==0:
return 0.
else:
return self.sumwx / self.sumw
def GetRMS(self):
if self.sumw==0:
return 0.
else:
mean = self.GetMean()
return sqrt(abs(self.sumw2x/self.sumw - mean*mean))
| gpl-3.0 | 4,464,193,754,576,342,000 | 27.494253 | 80 | 0.541751 | false | 3.837461 | false | false | false |
Pandaaaa906/ChemErpSystem | ERP/templatetags/erp_extras.py | 1 | 1616 | # -*- coding: utf-8 -*-
from django import template
from django.db.models.query import QuerySet
import datetime
from django import template
from django.contrib.auth.models import Group
from django.contrib.auth.models import User
from django.db.models import Q
register = template.Library()
@register.inclusion_tag('sidebar_tree.html')
def children_tag(person):
if isinstance(person, QuerySet):
children = person
else:
children = person.children.all()
#zip(children,map(children.content_type,children)
return {'children': children}
@register.filter(name='has_group')
def has_group(user, group_name):
return user.groups.filter(name=group_name).exists()
@register.filter(name='get_staff')
def get_staff(group_name):
group = Group.objects.get(name=group_name)
users = group.user_set.all()
return map(lambda x:x.id , users)
@register.filter(name='get_nameFromId')
def get_nameFromId(usrId):
if usrId:
user = User.objects.get(id=usrId)
return user.first_name+user.last_name
else:
return ""
@register.filter
def get_range( value ):
"""
Filter - returns a list containing range made from given value
Usage (in template):
<ul>{% for i in 3|get_range %}
<li>{{ i }}. Do something</li>
{% endfor %}</ul>
Results with the HTML:
<ul>
<li>0. Do something</li>
<li>1. Do something</li>
<li>2. Do something</li>
</ul>
Instead of 3 one may use the variable set in the views
"""
return range( value )
@register.filter
def get_fields(model):
return model._meta.get_fields() | apache-2.0 | 684,355,398,414,032,100 | 25.080645 | 66 | 0.667698 | false | 3.467811 | false | false | false |
rplevka/robottelo | tests/upgrades/test_repository.py | 1 | 9545 | """Test for Repository related Upgrade Scenarios
:Requirement: Upgraded Satellite
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: Repositories
:Assignee: tpapaioa
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import os
from fabric.api import execute
from fabric.api import run
from nailgun import entities
from upgrade.helpers.docker import docker_execute_command
from upgrade_tests import post_upgrade
from upgrade_tests import pre_upgrade
from upgrade_tests.helpers.scenarios import create_dict
from upgrade_tests.helpers.scenarios import dockerize
from upgrade_tests.helpers.scenarios import get_entity_data
from upgrade_tests.helpers.scenarios import rpm1
from upgrade_tests.helpers.scenarios import rpm2
from robottelo import ssh
from robottelo.api.utils import create_sync_custom_repo
from robottelo.api.utils import promote
from robottelo.config import settings
from robottelo.logging import logger
from robottelo.upgrade_utility import create_repo
from robottelo.upgrade_utility import host_location_update
from robottelo.upgrade_utility import install_or_update_package
from robottelo.upgrade_utility import publish_content_view
UPSTREAM_USERNAME = 'rTtest123'
DOCKER_VM = settings.upgrade.docker_vm
FILE_PATH = '/var/www/html/pub/custom_repo/'
CUSTOM_REPO = f'https://{settings.server.hostname}/pub/custom_repo'
_, RPM1_NAME = os.path.split(rpm1)
_, RPM2_NAME = os.path.split(rpm2)
class TestScenarioRepositoryUpstreamAuthorizationCheck:
"""This test scenario is to verify the upstream username in post-upgrade for a custom
repository which does have a upstream username but not password set on it in pre-upgrade.
Test Steps:
1. Before Satellite upgrade, Create a custom repository and sync it.
2. Set the upstream username on same repository using foreman-rake.
3. Upgrade Satellite.
4. Check if the upstream username value is removed for same repository.
"""
@pre_upgrade
def test_pre_repository_scenario_upstream_authorization(self):
"""Create a custom repository and set the upstream username on it.
:id: preupgrade-11c5ceee-bfe0-4ce9-8f7b-67a835baf522
:steps:
1. Create a custom repository and sync it.
2. Set the upstream username on same repository using foreman-rake.
:expectedresults:
1. Upstream username should be set on repository.
:BZ: 1641785
"""
org = entities.Organization().create()
custom_repo = create_sync_custom_repo(org_id=org.id)
rake_repo = f'repo = Katello::Repository.find_by_id({custom_repo})'
rake_username = f'; repo.root.upstream_username = "{UPSTREAM_USERNAME}"'
rake_repo_save = '; repo.save!(validate: false)'
result = run(f"echo '{rake_repo}{rake_username}{rake_repo_save}'|foreman-rake console")
assert 'true' in result
global_dict = {self.__class__.__name__: {'repo_id': custom_repo}}
create_dict(global_dict)
@post_upgrade(depend_on=test_pre_repository_scenario_upstream_authorization)
def test_post_repository_scenario_upstream_authorization(self):
"""Verify upstream username for pre-upgrade created repository.
:id: postupgrade-11c5ceee-bfe0-4ce9-8f7b-67a835baf522
:steps:
1. Verify upstream username for pre-upgrade created repository using
foreman-rake.
:expectedresults:
1. upstream username should not exists on same repository.
:BZ: 1641785
"""
repo_id = get_entity_data(self.__class__.__name__)['repo_id']
rake_repo = f'repo = Katello::RootRepository.find_by_id({repo_id})'
rake_username = '; repo.root.upstream_username'
result = run(f"echo '{rake_repo}{rake_username}'|foreman-rake console")
assert UPSTREAM_USERNAME not in result
class TestScenarioCustomRepoCheck:
"""Scenario test to verify if we can create a custom repository and consume it
via client then we alter the created custom repository and satellite will be able
to sync back the repo.
Test Steps:
1. Before Satellite upgrade.
2. Create new Organization and Location.
3. Create Product, custom repo, cv.
4. Create activation key and add subscription in it.
5. Create a content host, register and install package on it.
6. Upgrade Satellite.
7. Remove Old package and add new package into custom repo.
8. Sync repo, publish new version of cv.
9. Try to install new package on client.
BZ: 1429201,1698549
"""
@pre_upgrade
def test_pre_scenario_custom_repo_check(self):
"""This is pre-upgrade scenario test to verify if we can create a
custom repository and consume it via content host.
:id: preupgrade-eb6831b1-c5b6-4941-a325-994a09467478
:steps:
1. Before Satellite upgrade.
2. Create new Organization, Location.
3. Create Product, custom repo, cv.
4. Create activation key and add subscription.
5. Create a content host, register and install package on it.
:expectedresults:
1. Custom repo is created.
2. Package is installed on Content host.
"""
org = entities.Organization().create()
loc = entities.Location(organization=[org]).create()
lce = entities.LifecycleEnvironment(organization=org).create()
product = entities.Product(organization=org).create()
create_repo(rpm1, FILE_PATH)
repo = entities.Repository(product=product.id, url=CUSTOM_REPO).create()
repo.sync()
content_view = publish_content_view(org=org, repolist=repo)
promote(content_view.version[0], lce.id)
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/{}/{}/custom/{}/{}/'
'Packages/b/|grep {}'.format(
org.label, lce.name, content_view.label, product.label, repo.label, RPM1_NAME
)
)
assert result.return_code == 0
assert len(result.stdout) >= 1
subscription = entities.Subscription(organization=org).search(
query={'search': f'name={product.name}'}
)[0]
ak = entities.ActivationKey(
content_view=content_view, organization=org.id, environment=lce
).create()
ak.add_subscriptions(data={'subscription_id': subscription.id})
rhel7_client = dockerize(ak_name=ak.name, distro='rhel7', org_label=org.label)
client_container_id = [value for value in rhel7_client.values()][0]
client_container_name = [key for key in rhel7_client.keys()][0]
host_location_update(
client_container_name=client_container_name, logger_obj=logger, loc=loc
)
status = execute(
docker_execute_command,
client_container_id,
'subscription-manager identity',
host=DOCKER_VM,
)[DOCKER_VM]
assert org.name in status
install_or_update_package(client_hostname=client_container_id, package=RPM1_NAME)
scenario_dict = {
self.__class__.__name__: {
'content_view_name': content_view.name,
'lce_id': lce.id,
'lce_name': lce.name,
'org_label': org.label,
'prod_label': product.label,
'rhel_client': rhel7_client,
'repo_name': repo.name,
}
}
create_dict(scenario_dict)
@post_upgrade(depend_on=test_pre_scenario_custom_repo_check)
def test_post_scenario_custom_repo_check(self):
"""This is post-upgrade scenario test to verify if we can alter the
created custom repository and satellite will be able to sync back
the repo.
:id: postupgrade-5c793577-e573-46a7-abbf-b6fd1f20b06e
:steps:
1. Remove old and add new package into custom repo.
2. Sync repo , publish the new version of cv.
3. Try to install new package on client.
:expectedresults: Content host should able to pull the new rpm.
"""
entity_data = get_entity_data(self.__class__.__name__)
client = entity_data.get('rhel_client')
client_container_id = list(client.values())[0]
content_view_name = entity_data.get('content_view_name')
lce_id = entity_data.get('lce_id')
lce_name = entity_data.get('lce_name')
org_label = entity_data.get('org_label')
prod_label = entity_data.get('prod_label')
repo_name = entity_data.get('repo_name')
create_repo(rpm2, FILE_PATH, post_upgrade=True, other_rpm=rpm1)
repo = entities.Repository(name=repo_name).search()[0]
repo.sync()
content_view = entities.ContentView(name=content_view_name).search()[0]
content_view.publish()
content_view = entities.ContentView(name=content_view_name).search()[0]
promote(content_view.version[-1], lce_id)
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/{}/{}/custom/{}/{}/'
'Packages/c/| grep {}'.format(
org_label, lce_name, content_view.label, prod_label, repo.label, RPM2_NAME
)
)
assert result.return_code == 0
assert len(result.stdout) >= 1
install_or_update_package(client_hostname=client_container_id, package=RPM2_NAME)
| gpl-3.0 | 6,445,310,977,398,865,000 | 35.996124 | 95 | 0.651545 | false | 3.811901 | true | false | false |
YevgeniyaK/python_training | fixture/group.py | 1 | 4957 | from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_group_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def return_to_group_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
'''
Форма создания новой группы
'''
def create(self, group):
wd = self.app.wd
self.open_group_page()
# init group greation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_group_page()
self.group_cache = None
'''
Удаление первой группы
'''
def delete_first_group(self):
self.delete_group_by_index(0)
'''
Удаление рандомной группы
'''
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
#submit deletion
wd.find_element_by_name("delete").click()
self.return_to_group_page()
self.group_cache = None
'''
Модификация группы
'''
def change_group(self, group):
wd = self.app.wd
self.open_group_page()
wd.find_element_by_name("edit").click()
self.fill_group_form(group)
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
'''
Модификация первой группы
'''
def modify_first_group(self):
self.modify_group_by_index(0)
'''
Модификация рандомной группы
'''
def modify_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
# modification
wd.find_element_by_name("edit").click()
# fill
self.fill_group_form(new_group_data)
# submit
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
'''
fill group form
'''
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
'''
отдельный метод выбора первой группы
'''
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
'''
метод выбора рандомной группы
'''
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
'''
проверка на существование групп
'''
def count(self):
wd = self.app.wd
self.open_group_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
'''
Получение списка групп
'''
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_group_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name = text.strip(), id = id))
return list(self.group_cache)
def delete_group_by_id(self, id):
wd = self.app.wd
self.open_group_page()
self.select_group_by_id(id)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_group_page()
self.group_cache = None
def select_group_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def modify_group_by_id(self, id, new_group_data):
wd = self.app.wd
self.open_group_page()
self.select_group_by_id(id)
# modification
wd.find_element_by_name("edit").click()
# fill
self.fill_group_form(new_group_data)
# submit
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
| apache-2.0 | -8,982,111,332,978,443,000 | 26.114943 | 100 | 0.569097 | false | 3.128647 | false | false | false |
patcorwin/fossil | pdil/tool/fossil/rigging/splineTwist.py | 1 | 24347 | from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import math
from pymel.core import curve, cluster, delete, dt, duplicate, expression, group, hide, ikHandle, insertKnotCurve, joint, move, orientConstraint, parent, parentConstraint, pointConstraint, xform
from ....add import simpleName, shortName
from .... import core
from .... import nodeApi
from .. import controllerShape
from .. import space
from ..cardRigging import MetaControl, ParamInfo
from . import _util as util
from .. import node
class OrientMode:
CLOSEST_JOINT = 'closest_joint'
WORLD = 'world'
AS_FIRST_JOINT = 'as_first_joint'
class TwistStyle:
'''
Used by splineIk. Advanced uses advanced twist while the others determin
which rotation axis drives the twist attribute.
'''
ADVANCED = 'Advanced'
X = 'X'
NEG_X = '-X'
Y = 'Y'
NEG_Y = '-Y'
Z = 'Z'
NEG_Z = '-Z'
@classmethod
def asChoices(cls):
choices = OrderedDict()
choices[cls.ADVANCED] = cls.ADVANCED
choices[cls.X] = cls.X
choices[cls.NEG_X] = cls.NEG_X
choices[cls.Y] = cls.Y
choices[cls.NEG_Y] = cls.NEG_Y
choices[cls.Z] = cls.Z
choices[cls.NEG_Z] = cls.NEG_Z
return choices
@util.adds('twist', 'stretch')
@util.defaultspec( {'shape': 'sphere', 'size': 10, 'color': 'blue 0.22'} )
def buildSplineTwist(start, end, controlCountOrCrv=4, twistInfDist=0, simplifyCurve=True,
tipBend=True, sourceBend=True, matchOrient=True, allowOffset=True, # noqa e128
useLeadOrient=False, # This is an backwards compatible option, mutually exclusive with matchOrient
twistStyle=TwistStyle.ADVANCED, duplicateCurve=True,
controlOrient=OrientMode.CLOSEST_JOINT,
name='', groupName='', controlSpec={}):
'''
Make a spline controller from `start` to `end`.
:param int twistInfDist: Default twist controls to falloff before hitting eachother.
Otherwise it is the number of joints on either side it will influence.
:param bool simplifyCurve: Only used if # of cvs is specified. Turning it
on will likely result it the curve not matching the existing joint position
but will be more evenly spaced per control.
:param bool tipBend: If True, an extra cv will be added at the second to
last joint, controlled by the last controller to ease out.
##:param bool applyDirectly: If True, rig the given joints, do not make a duplicate chain
:param bool useLeadOrient: If True, the controllers will be aligned the same
as the first joint.
**NOTE** I think this option only exists to preserve previous builds, this is pretty dumb
:param bool matchOrient: Does trueZero on the start and end. I'm not sure this makes sense.
.. todo::
* Add the same spline chain +X towards child that the neck has and test out advancedTwist()
* See if I can identify the closest joint to a control and orient to that
* The first joint has parent AND local, which are the same thing, keep this for convenience of selecting all the controls and editing attrs?
* Test specifying your own curve
* There is a float division error that can happen if there are too many control cvs.
* Verify twists work right with unsimplified curves (hint, I don't think they do).
'''
matchOrient = False
useLeadOrient = False
if isinstance( controlCountOrCrv, int ):
assert controlCountOrCrv > 3, "controlCount must be at least 4"
# The axis to twist and stretch on.
jointAxis = util.identifyAxis( start.listRelatives(type='joint')[0] )
# Make a duplicate chain for the IK that will also stretch.
stretchingChain = util.dupChain( start, end, '{0}_stretch' )
# &&& NOTE! This might affect advanced twist in some way.
# If the chain is mirrored, we need to reorient to point down x so the
# spline doesn't mess up when the main control rotates
if stretchingChain[1].tx.get() < 0:
# Despite aggresive zeroing of the source, the dup can still end up slightly
# off zero so force it.
for jnt in stretchingChain:
jnt.r.set(0, 0, 0)
joint( stretchingChain[0], e=True, oj='xyz', secondaryAxisOrient='yup', zso=True, ch=True)
joint( stretchingChain[-1], e=True, oj='none')
if isinstance( controlCountOrCrv, int ):
mainIk, _effector, crv = ikHandle( sol='ikSplineSolver',
sj=stretchingChain[0],
ee=stretchingChain[-1],
ns=controlCountOrCrv - 3,
simplifyCurve=simplifyCurve)
else:
if duplicateCurve:
crv = duplicate(controlCountOrCrv)[0]
else:
crv = controlCountOrCrv
mainIk, _effector = ikHandle( sol='ikSplineSolver',
sj=stretchingChain[0],
ee=stretchingChain[-1],
ccv=False,
pcv=False)
crv.getShape().worldSpace[0] >> mainIk.inCurve
hide(mainIk)
mainIk.rename( simpleName(start, "{0}_ikHandle") )
crv.rename( simpleName(start, "{0}_curve") )
if not name:
name = util.trimName(start)
if name.count(' '):
name, endName = name.split()
else:
endName = ''
# Only add a tipBend cv if number of cvs was specified.
if tipBend and isinstance( controlCountOrCrv, int ):
currentTrans = [ xform(cv, q=True, ws=True, t=True) for cv in crv.cv ]
insertKnotCurve( crv.u[1], nk=1, add=False, ib=False, rpo=True, cos=True, ch=True)
for pos, cv in zip(currentTrans, crv.cv[:-2]):
xform( cv, ws=True, t=pos )
xform( crv.cv[-2], ws=True, t=xform(end.getParent(), q=True, ws=True, t=True) )
xform( crv.cv[-1], ws=True, t=currentTrans[-1] )
# Only add a sourceBend cv if number of cvs was specified.
if sourceBend and isinstance( controlCountOrCrv, int ):
currentTrans = [ xform(cv, q=True, ws=True, t=True) for cv in crv.cv ]
insertKnotCurve( crv.u[1.2], nk=1, add=False, ib=False, rpo=True, cos=True, ch=True) # I honestly don't know why, but 1.2 must be different than 1.0
for pos, cv in zip(currentTrans[1:], crv.cv[2:]):
xform( cv, ws=True, t=pos )
xform( crv.cv[0], ws=True, t=currentTrans[0] )
xform( crv.cv[1], ws=True, t=xform(stretchingChain[1], q=True, ws=True, t=True) )
grp = group(em=True, p=node.mainGroup(), n=start.name() + "_splineTwist")
controls = util.addControlsToCurve(name + 'Ctrl', crv, controlSpec['main'])
for ctrl in controls:
core.dagObj.zero(ctrl).setParent( grp )
if controlOrient == OrientMode.CLOSEST_JOINT:
# Use the real chain to match orientations since the stretching chain might reorient to compensate for mirroring.
jointPos = {j: dt.Vector(xform(j, q=True, ws=True, t=True)) for j in util.getChain(start, end)}
aveSpacing = util.chainLength(stretchingChain) / (len(stretchingChain) - 1)
for ctrl in controls:
cpos = dt.Vector(xform(ctrl, q=True, ws=True, t=True))
distances = [ ( (jpos - cpos).length() / aveSpacing, j) for j, jpos in jointPos.items() ]
distances.sort()
''' Just use the closest joint if within 10% of the average spacing
Possible future improvement, look at two joints, and determine if
the control is between them and inbetween the orientation.
'''
if True: # distances[0][0] < 100:
r = xform(distances[0][1], q=True, ro=True, ws=True)
with core.dagObj.Solo(ctrl):
xform(ctrl, ro=r, ws=True)
core.dagObj.zero(ctrl)
"""
# Otherwise split the distances by the percentages
else:
#m1 = xform(distances[0][1], q=True, m=True, ws=True)
#m2 = xform(distances[1][1], q=True, m=True, ws=True)
distA, jointA = distances[0]
distB, jointB = distances[1]
x, y, z = midOrient2(jointA, jointB)
matrix = list(x) + [0] + list(y) + [0] + list(z) + [0] + xform(ctrl, q=True, ws=True, t=True) + [1.0]
print( ctrl, 'to', jointA, jointB )
with Solo(ctrl):
xform(ctrl, ws=True, m=matrix)
# Need to improve my matrix skills, for now it's easy enough to just rotate it
#rotate(ctrl, [0, 180, 0], os=1, r=1)
core.dagObj.zero(ctrl)
"""
if endName:
controls[-1].rename(endName + 'Ctrl')
if matchOrient:
util.trueZeroSetup(start, controls[0])
util.trueZeroSetup(end, controls[-1])
if tipBend:
if useLeadOrient and not matchOrient:
controls[-1].setRotation( end.getRotation(space='world'), space='world' )
parent( controls[-2].getChildren(), controls[-1] )
name = controls[-2].name()
delete( core.dagObj.zero(controls[-2]) )
if not endName:
controls[-1].rename(name)
controls[-2] = controls[-1]
controls.pop()
#core.dagObj.zero(controls[-2]).setParent(controls[-1])
#channels = [t + a for t in 'trs' for a in 'xyz']
#for channel in channels:
# controls[-2].attr( channel ).setKeyable(False)
# controls[-2].attr( channel ).lock()
if sourceBend:
names = []
for ctrl in controls[1:-1]:
names.append( ctrl.name() )
ctrl.rename( '__temp' )
endNum = -1 if endName else None
for name, cur in zip(names, controls[2:endNum] ):
cur.rename(name)
if useLeadOrient and not matchOrient:
controls[0].setRotation( start.getRotation(space='world'), space='world' )
parent( controls[1].getChildren(), controls[0] )
delete( core.dagObj.zero(controls[1]) )
del controls[1]
controls[0] = nodeApi.RigController.convert(controls[0])
controls[0].container = grp
stretchAttr, jointLenMultiplier = util.makeStretchySpline(controls[0], mainIk)
connectingCurve = addConnectingCurve(controls)
controls[0].visibility >> connectingCurve.visibility
# Make twist for everything but hide them all and drive the ones that overlap
# with spline controllers by the spline control.
if not twistInfDist:
numJoints = countJoints(start, end)
twistInfDist = int(math.ceil( numJoints - len(controls) ) / float(len(controls) - 1))
twistInfDist = max(1, twistInfDist)
noInherit = group(em=True, p=grp, n='NoInheritTransform')
core.dagObj.lockTrans(noInherit)
core.dagObj.lockRot(noInherit)
core.dagObj.lockScale(noInherit)
noInherit.inheritsTransform.set(False)
noInherit.inheritsTransform.lock()
# &&& If simplify curve is ON, the last joint gets constrained to the spinner?
# Otherwise it gets constrained to the offset or stretch joint, which I think is correct.
if allowOffset:
# If allowOffset, make another chain to handle the difference in joint positions.
offsetChain = util.dupChain( start, end, '{0}_offset' )
offsetChain[0].setParent(noInherit)
hide(offsetChain[0])
twists, constraints = addTwistControls( offsetChain, start, end, twistInfDist)
finalRigJoint = offsetChain[-1]
else:
twists, constraints = addTwistControls( stretchingChain, start, end, twistInfDist )
finalRigJoint = stretchingChain[-1]
# Constrain the end to the last controller so it doesn't pop off at all,
# but still respect the stretch attr.
pointConstraint(finalRigJoint, end, e=True, rm=True)
# Make a proxy that can allows respecting stretch being active or not.
endProxy = duplicate(end, po=True)[0]
endProxy.rename('endProxy')
hide(endProxy)
endProxy.setParent(grp)
stretchAttr >> core.constraints.pointConst( controls[-1], endProxy, mo=True )
core.math.opposite(stretchAttr) >> core.constraints.pointConst( finalRigJoint, endProxy )
constraints.point >> core.constraints.pointConst( endProxy, end )
hide(twists)
numControls = len(controls)
numTwists = len(twists)
for i, ctrl in enumerate(controls):
index = int(round( i * ((numTwists - 1) / (numControls - 1)) ))
util.drive( ctrl, 'twist', twists[index].attr('r' + jointAxis) )
space.add( ctrl, start.getParent(), 'local' )
parents = [start.getParent()] + controls[:-1]
stretchingChain[0].setParent(noInherit)
crv.setParent(noInherit)
hide(crv, stretchingChain[0])
connectingCurve.setParent( noInherit )
mainIk.setParent(grp)
# Do not want to scale but let rotate for "fk-like" space mode
for ctrl, _parent in zip(controls, parents):
core.dagObj.lockScale( ctrl )
if useLeadOrient:
ctrl.setRotation( start.getRotation(space='world'), space='world' )
core.dagObj.zero(ctrl)
space.addMain(ctrl)
space.add( ctrl, _parent, 'parent')
for i, ctrl in enumerate(controls[1:]):
controls[0].subControl[str(i)] = ctrl
# Must constrain AFTER controls (possibly) get orientd
orientConstraint( controls[-1], finalRigJoint, mo=True )
# Setup advanced twist
if twistStyle == TwistStyle.ADVANCED:
# &&& Test using advancedTwist() to replace the code beloew
util.advancedTwist(stretchingChain[0], stretchingChain[1], controls[0], controls[-1], mainIk)
'''
startAxis = duplicate( start, po=True )[0]
startAxis.rename( 'startAxis' )
startAxis.setParent( controls[0] )
endAxis = duplicate( start, po=True )[0]
endAxis.rename( 'endAxis' )
endAxis.setParent( controls[-1] )
endAxis.t.set(0, 0, 0)
mainIk.dTwistControlEnable.set(1)
mainIk.dWorldUpType.set(4)
startAxis.worldMatrix[0] >> mainIk.dWorldUpMatrix
endAxis.worldMatrix[0] >> mainIk.dWorldUpMatrixEnd
hide(startAxis, endAxis)
'''
else:
if twistStyle == TwistStyle.X:
controls[-1].rx >> mainIk.twist
elif twistStyle == TwistStyle.NEG_X:
core.math.multiply(controls[-1].rx, -1.0) >> mainIk.twist
elif twistStyle == TwistStyle.Y:
controls[-1].ry >> mainIk.twist
elif twistStyle == TwistStyle.NEG_Y:
core.math.multiply(controls[-1].ry, -1.0) >> mainIk.twist
elif twistStyle == TwistStyle.Z:
controls[-1].rz >> mainIk.twist
elif twistStyle == TwistStyle.NEG_Z:
core.math.multiply(controls[-1].rz, -1.0) >> mainIk.twist
# To make .twist work, the chain needs to follow parent joint
follow = group(em=True, p=grp)
target = start.getParent()
core.dagObj.matchTo(follow, stretchingChain[0])
parentConstraint( target, follow, mo=1 )
follow.rename(target + '_follow')
stretchingChain[0].setParent(follow)
# Constraint the offset (if exists) to the stretch last to account for any adjustments.
if allowOffset:
util.constrainAtoB(offsetChain[:-1], stretchingChain[:-1])
pointConstraint(stretchingChain[-1], offsetChain[-1], mo=True)
return controls[0], constraints
def addTwistControls(controlChain, boundChain, boundEnd, influenceDist=3):
'''
Put a rotation controller under each child of the controlChain to drive .rz
of the boundChain. They must both be the same size.
:param Joint controlChain: The first joint of the controlling rig (ideally pruned)
:param Joint boundChain: The first joint of joints being controlled by the spline.
:param Joint boundEnd: The last joint in the bound chain, used to address possible branching.
:param int influenceDist: How many adjacent joints are influenced (total #
is 2x since it influences both directions).
'''
obj = controlChain[0]
target = boundChain
#controlJoints = getChain( controlChain, findChild(controlChain, shortName(boundEnd)) )
controlJoints = controlChain
boundJoints = util.getChain( boundChain, util.findChild(boundChain, shortName(boundEnd)) )
assert len(controlJoints) == len(boundJoints), "Failure when adding twist controls, somehow the chains don't match length, contorls {0} != {1}".format( len(controlJoints), len(boundJoints) )
controls = []
groups = []
pointConstraints = []
orientConstraints = []
for i, (obj, target) in enumerate(zip(controlJoints, boundJoints)):
c = controllerShape.simpleCircle()
c.setParent(obj)
c.t.set(0, 0, 0)
c.r.set(0, 0, 0)
controls.append(c)
spinner = group(em=True, name='spinner%i' % i, p=target)
spinner.r.set(0, 0, 0)
spinner.setParent(obj)
spinner.t.set(0, 0, 0)
# Aligning the spinners to the bound joint means we don't have to offset
# the orientConstraint which means nicer numbers.
# spinner.setRotation( target.getRotation(space='world'), space='world' )
groups.append(spinner)
pointConstraints.append( core.constraints.pointConst( obj, target, maintainOffset=False ) )
orientConstraints.append( core.constraints.orientConst( spinner, target, maintainOffset=False ) )
children = obj.listRelatives(type='joint')
if children:
obj = children[0]
else:
obj = None
break
for pSrc, pDest in zip( pointConstraints[:-1], pointConstraints[1:]):
pSrc >> pDest
for oSrc, oDest in zip( orientConstraints[:-1], orientConstraints[1:]):
oSrc >> oDest
# &&& This and the i+7 reflect the number of controls that influence
bigList = [None] * influenceDist + controls + [None] * influenceDist
influenceRange = (influenceDist * 2) + 1
axis = util.identifyAxis(controlChain[0].listRelatives(type='joint')[0])
exp = []
for i, spinner in enumerate(groups):
exp.append(driverExpression( spinner, bigList[i: i + influenceRange], axis ))
expression( s=';\n'.join(exp) )
return controls, util.ConstraintResults( pointConstraints[0], orientConstraints[0] )
class SplineTwist(MetaControl):
''' Spline IK that provides control to twist individual sections. '''
ik_ = 'pdil.tool.fossil.rigging.splineTwist.buildSplineTwist'
ikInput = OrderedDict( [
('controlCountOrCrv', [
ParamInfo( 'CV count', 'How many cvs to use in auto generated curve', ParamInfo.INT, default=4, min=4 ),
ParamInfo( 'Curve', 'A nurbs curve to use for spline', ParamInfo.NODE_0 ),
] ),
('simplifyCurve',
ParamInfo( 'Simplify Curve', 'If True, the curve cvs will space out evenly, possibly altering the postions', ParamInfo.BOOL, default=True) ),
('twistInfDist',
ParamInfo( 'Twist influence', 'How many joints on one side are influenced by the twisting, zero means it is done automatically.', ParamInfo.INT, default=0, min=0) ),
('tipBend',
ParamInfo( 'Tip Bend', 'The tip control should influence the ease out bend', ParamInfo.BOOL, default=True) ),
('sourceBend',
ParamInfo( 'Source Bend', 'The source control should influence the ease in bend', ParamInfo.BOOL, default=True) ),
('matchOrient',
ParamInfo( 'Match Orient', "First and last controller are set to TrueZero'd", ParamInfo.BOOL, default=True) ),
('useLeadOrient',
ParamInfo( 'Lead Orient', 'The controls have the same orientation as the first joint', ParamInfo.BOOL, default=False) ),
('allowOffset',
ParamInfo( 'Allow Offset', 'If you Simplyify Curve, the joints will slightly shift unless you Allow Offset or the joints are straight', ParamInfo.BOOL, default=False) ),
('twistStyle',
ParamInfo( 'Twist Style', '0 = advanced, 1=x, 2=-x 3=y ...', ParamInfo.ENUM, enum=TwistStyle.asChoices(), default=TwistStyle.ADVANCED ) ),
('name',
ParamInfo( 'Name', 'Name', ParamInfo.STR, '')),
] )
fkArgs = {'translatable': True}
@classmethod
def readIkKwargs(cls, card, isMirroredSide, sideAlteration=lambda **kwargs: kwargs, kinematicType='ik'):
'''
Overriden to handle if a custom curve was given, which then needs to be duplicated, mirrored and
fed directly into the splineTwist.
'''
kwargs = cls.readKwargs(card, isMirroredSide, sideAlteration, kinematicType='ik')
if isMirroredSide:
if 'controlCountOrCrv' in kwargs and not isinstance( kwargs['controlCountOrCrv'], int ):
crv = kwargs['controlCountOrCrv']
crv = duplicate(crv)[0]
kwargs['controlCountOrCrv'] = crv
move( crv.sp, [0, 0, 0], a=True )
move( crv.rp, [0, 0, 0], a=True )
crv.sx.set(-1)
kwargs['duplicateCurve'] = False
return kwargs
def addConnectingCurve(objs):
'''
Given a list of objects, make a curve that links all of them.
'''
crv = curve( d=1, p=[(0, 0, 0)] * len(objs) )
grp = group(crv, n='connectingCurve')
for i, obj in enumerate(objs):
handle = cluster(crv.cv[i])[1]
pointConstraint( obj, handle )
handle.setParent( grp )
hide(handle)
crv.getShape().overrideEnabled.set( 1 )
crv.getShape().overrideDisplayType.set( 2 )
return grp
def countJoints(start, end):
count = 2
p = end.getParent()
while p and p != start:
p = p.getParent()
count += 1
if not p:
return 0
return count
def driverExpression( driven, controls, axis ):
'''
The `driven` node's .rz will be driven by the list of `controls`.
`controls` is a list of objects, and optional empty entries.
Example, if you have joints, A B C and controls X Y Z, you would do:
driverExpression( A, [None, X, Y] )
driverExpression( B, [X, Y, Z] )
driverExpression( C, [Y, Z, None] )
This means A will be fully influenced by X, and partially by Y.
B is fully influenced by Y and partially by X and Z.
'''
powers = calcInfluence(controls)
exp = []
for power, ctrl in zip(powers, controls):
if ctrl:
exp.append( '{0}.r{axis} * {1}'.format(ctrl, power, axis=axis) )
return '{0}.r{axis} = {1};'.format( driven, ' + '.join(exp), axis=axis )
def calcInfluence( controls ):
'''
Given a list (Maybe change to a number?) returns a list of power falloffs.
controls can have None placeholders
power falls off to end of controls
low upper
v v
0 1 2 3 4
# Result: [0.5, 0.75, 1.0, 0.75, 0.5]
low upper
v v
0 1 2 3 4 5
# Result: [0.5, 0.75, 1.0, 1.0, 0.75, 0.5]
'''
max = len(controls)
if len(controls) % 2 == 0:
upper = int(len(controls) / 2 + 1)
lower = upper - 2
else:
upper = int(len(controls) / 2 + 1)
lower = upper - 1
delta = 1 / float(lower) * 0.5
powers = [1.0] * len(controls)
#for i, (lowCtrl, upCtrl) in enumerate(zip(controls[upper:], reversed(controls[:lower]) ), 1):
for i, (lowCtrl, upCtrl) in enumerate(zip(range(upper, max), range( lower - 1, -1, -1 ) ), 1):
power = 1 - delta * i
powers[lowCtrl] = power
powers[upCtrl] = power
return powers
| bsd-3-clause | -3,334,268,302,058,666,000 | 37.341732 | 194 | 0.60759 | false | 3.658452 | false | false | false |
t00mas/datascience-python | classification/knearest.py | 1 | 1554 | import matplotlib
import matplotlib.pyplot as pyplot
import numpy
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
def get_iris_dataset():
iris = datasets.load_iris()
return iris.data[:, :2], iris.target
def get_knn_classifier(X, y, n_neighbors=None):
if not n_neighbors:
n_neighbors = 6
classifier = neighbors.KNeighborsClassifier(n_neighbors, weights='distance')
classifier.fit(X, y)
return classifier, n_neighbors
def get_meshgrid(X, y, h=None):
if not h:
h = .02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
return numpy.meshgrid(
numpy.arange(x_min, x_max, h), numpy.arange(y_min, y_max, h))
def predict(classifier, mesh_xx, mesh_yy):
Z = classifier.predict(numpy.c_[mesh_xx.ravel(), mesh_yy.ravel()])
return Z.reshape(mesh_xx.shape)
def plot_classified_regions(X, y, classifier, n_neighbors):
xx, yy = get_meshgrid(X, y)
Z = predict(classifier, xx, yy)
pyplot.figure()
pyplot.pcolormesh(xx, yy, Z)
# Plot also the training points
cmap = ListedColormap(['#FFAAAA', '#AAFFAA','#00AAFF'])
pyplot.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap, alpha=0.8)
pyplot.xlim(xx.min(), xx.max())
pyplot.ylim(yy.min(), yy.max())
pyplot.title("3-Class classification (k = %i)" % (n_neighbors))
pyplot.savefig('knearest.png')
X, y = get_iris_dataset()
knn, n_neighbors = get_knn_classifier(X, y)
plot_classified_regions(X, y, knn, n_neighbors)
| mit | 2,858,620,498,153,865,000 | 28.320755 | 80 | 0.63964 | false | 2.971319 | false | false | false |
arduino-org/s4t-iotronic | lib/test_pub.py | 1 | 2229 |
wampAddress = 'ws://172.17.3.139:8181/ws'
wampRealm = 's4t'
#from threading import Thread
from autobahn.twisted.wamp import ApplicationRunner
from autobahn.twisted.wamp import ApplicationSession
from twisted.internet.defer import inlineCallbacks
#import per test
from twisted.internet.defer import DeferredQueue
from twisted.internet import threads
#Classe autobahn per ka gestione della comunicazione con i dispositivi remoti
class AutobahnMRS(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
print("Sessio attached [Connect to WAMP Router] Sub")
def onMessage(*args):
print args
try:
yield self.subscribe(onMessage, 'test')
print ("Subscribed to topic: test")
except Exception as e:
print("Exception:" +e)
#Classe autobahn per la gestione della comunicazione interna
class AutobahnIM(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
print("Sessio attached [Connect to WAMP Router] Pub")
try:
yield self.publish('test','YOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO')
print ("Publish to topic: test")
except Exception as e:
print("Exception:" +e)
#Classe per la gestione della comunicazioni con i dispositivi remoti
class ManageRemoteSystem:
def __init__(self):
self.runner = ApplicationRunner(url= wampAddress, realm = wampRealm)
def start(self):
self.runner.run(AutobahnMRS, start_reactor=False);
#Classe per la gestione della comunicazione interna al ManageRemoteSystem
class InternalMessages:
def __init__(self):
self.runner = ApplicationRunner(url= wampAddress, realm = wampRealm)
def start(self):
self.runner.run(AutobahnIM, start_reactor=False);
#Classe principale per il servizio iotronic
#class S4tServer:
def something():
count = 0
while True:
print('something:', count)
yield sleep(1)
count+=1
if __name__ == '__main__':
#import multiprocessing
server = ManageRemoteSystem()
#sendMessage = InternalMessages()
server.start()
#sendMessage.start()
from twisted.internet import reactor
reactor.run()
#thread1 = Thread(target = reactor.run())
#thread2 = Thread(target = something())
#thread2.start()
#thread1.start()
#thread1.daemon = True
#thread2.daemon = True
#thread2.join()
#thread1.join()
| apache-2.0 | -9,203,456,461,136,472,000 | 21.979381 | 77 | 0.746074 | false | 3.126227 | false | false | false |
pitunti/alfaPitunti | plugin.video.alfa/channels/tupornotv.py | 1 | 10715 | # -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Pendientes de Votación", action="novedades",
url="http://tuporno.tv/pendientes"))
itemlist.append(
Item(channel=item.channel, title="Populares", action="masVistos", url="http://tuporno.tv/", folder=True))
itemlist.append(
Item(channel=item.channel, title="Categorias", action="categorias", url="http://tuporno.tv/categorias/",
folder=True))
itemlist.append(Item(channel=item.channel, title="Videos Recientes", action="novedades",
url="http://tuporno.tv/videosRecientes/", folder=True))
itemlist.append(Item(channel=item.channel, title="Top Videos (mas votados)", action="masVotados",
url="http://tuporno.tv/topVideos/", folder=True))
itemlist.append(Item(channel=item.channel, title="Nube de Tags", action="categorias", url="http://tuporno.tv/tags/",
folder=True))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def novedades(item):
logger.info()
url = item.url
# ------------------------------------------------------
# Descarga la página
# ------------------------------------------------------
data = scrapertools.cachePage(url)
# logger.info(data)
# ------------------------------------------------------
# Extrae las entradas
# ------------------------------------------------------
# seccion novedades
'''
<table border="0" cellpadding="0" cellspacing="0" ><tr><td align="center" width="100%" valign="top" height="160px">
<a href="/videos/cogiendo-en-el-bosque"><img src="imagenes/videos//c/o/cogiendo-en-el-bosque_imagen2.jpg" alt="Cogiendo en el bosque" border="0" align="top" /></a>
<h2><a href="/videos/cogiendo-en-el-bosque">Cogiendo en el bosque</a></h2>
'''
patronvideos = '<div class="relative">(.*?)</div><div class="video'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
# if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
# Titulo
try:
scrapedtitle = re.compile('title="(.+?)"').findall(match)[0]
except:
scrapedtitle = ''
try:
scrapedurl = re.compile('href="(.+?)"').findall(match)[0]
scrapedurl = urlparse.urljoin(url, scrapedurl)
except:
continue
try:
scrapedthumbnail = re.compile('src="(.+?)"').findall(match)[0]
scrapedthumbnail = urlparse.urljoin(url, scrapedthumbnail)
except:
scrapedthumbnail = ''
scrapedplot = ""
try:
duracion = re.compile('<div class="duracion">(.+?)<').findall(match)[0]
except:
try:
duracion = re.compile('\((.+?)\)<br').findall(match[3])[0]
except:
duracion = ""
# logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"], duracion=["+duracion+"]")
# Añade al listado de XBMC
# trozos = scrapedurl.split("/")
# id = trozos[len(trozos)-1]
# videos = "http://149.12.64.129/videoscodiH264/"+id[0:1]+"/"+id[1:2]+"/"+id+".flv"
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False))
# ------------------------------------------------------
# Extrae el paginador
# ------------------------------------------------------
# <a href="/topVideos/todas/mes/2/" class="enlace_si">Siguiente </a>
patronsiguiente = '<a href="(.+?)" class="enlace_si">Siguiente </a>'
siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data)
if len(siguiente) > 0:
scrapedurl = urlparse.urljoin(url, siguiente[0])
itemlist.append(Item(channel=item.channel, action="novedades", title="!Next page", url=scrapedurl, folder=True))
return itemlist
def masVistos(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/hoy", folder=True))
itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades", url="http://tuporno.tv/recientes",
folder=True))
itemlist.append(
Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/semana", folder=True))
itemlist.append(
Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/mes", folder=True))
itemlist.append(
Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/ano", folder=True))
return itemlist
def categorias(item):
logger.info()
url = item.url
# ------------------------------------------------------
# Descarga la página
# ------------------------------------------------------
data = scrapertools.cachePage(url)
# logger.info(data)
# ------------------------------------------------------
# Extrae las entradas
# ------------------------------------------------------
# seccion categorias
# Patron de las entradas
if url == "http://tuporno.tv/categorias/":
patronvideos = '<li><a href="([^"]+)"' # URL
patronvideos += '>([^<]+)</a></li>' # TITULO
else:
patronvideos = '<a href="(.tags[^"]+)"' # URL
patronvideos += ' class="[^"]+">([^<]+)</a>' # TITULO
matches = re.compile(patronvideos, re.DOTALL).findall(data)
# if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
if match[1] in ["SexShop", "Videochat", "Videoclub"]:
continue
# Titulo
scrapedtitle = match[1]
scrapedurl = urlparse.urljoin(url, match[0])
scrapedthumbnail = ""
scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
# Añade al listado de XBMC
itemlist.append(Item(channel=item.channel, action="novedades", title=scrapedtitle.capitalize(), url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
return itemlist
def masVotados(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/topVideos/todas/hoy",
folder=True))
itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades",
url="http://tuporno.tv/topVideos/todas/recientes", folder=True))
itemlist.append(
Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/topVideos/todas/semana",
folder=True))
itemlist.append(
Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/topVideos/todas/mes",
folder=True))
itemlist.append(
Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/topVideos/todas/ano",
folder=True))
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
else:
texto = item.extra.replace(" ", "+")
item.url = "http://tuporno.tv/buscador/?str=" + texto
try:
return getsearch(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def getsearch(item):
logger.info()
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
patronvideos = '<div class="relative"><a href="(.videos[^"]+)"[^>]+><img.+?src="([^"]+)" alt="(.+?)" .*?<div class="duracion">(.+?)</div></div></div>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
itemlist = []
for match in matches:
# Titulo
scrapedtitle = match[2].replace("<b>", "")
scrapedtitle = scrapedtitle.replace("</b>", "")
scrapedurl = urlparse.urljoin("http://tuporno.tv/", match[0])
scrapedthumbnail = urlparse.urljoin("http://tuporno.tv/", match[1])
scrapedplot = ""
duracion = match[3]
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False))
'''<a href="/buscador/?str=busqueda&desde=HV_PAGINA_SIGUIENTE" class="enlace_si">Siguiente </a>'''
patronsiguiente = '<a href="([^"]+)" class="enlace_si">Siguiente </a>'
siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data)
if len(siguiente) > 0:
patronultima = '<!--HV_SIGUIENTE_ENLACE'
ultpagina = re.compile(patronultima, re.DOTALL).findall(data)
scrapertools.printMatches(siguiente)
if len(ultpagina) == 0:
scrapedurl = urlparse.urljoin(item.url, siguiente[0])
itemlist.append(
Item(channel=item.channel, action="getsearch", title="!Next page", url=scrapedurl, folder=True))
return itemlist
def play(item):
logger.info()
itemlist = []
# Lee la pagina del video
data = scrapertools.cachePage(item.url)
codVideo = scrapertools.get_match(data, 'body id="([^"]+)"')
logger.info("codVideo=" + codVideo)
# Lee la pagina con el codigo
# http://tuporno.tv/flvurl.php?codVideo=188098&v=MAC%2011,5,502,146
url = "http://tuporno.tv/flvurl.php?codVideo=" + codVideo + "&v=MAC%2011,5,502,146"
data = scrapertools.cachePage(url)
logger.info("data=" + data)
kpt = scrapertools.get_match(data, "kpt\=(.+?)\&")
logger.info("kpt=" + kpt)
# Decodifica
import base64
url = base64.decodestring(kpt)
logger.info("url=" + url)
itemlist.append(
Item(channel=item.channel, action="play", title=item.title, url=url, thumbnail=item.thumbnail, plot=item.plot,
server="Directo", folder=False))
return itemlist
| gpl-3.0 | 8,343,848,909,474,973,000 | 39.549242 | 167 | 0.567865 | false | 3.560027 | false | false | false |
designcc/django-ccbasket | ccbasket_testproject/shop/views.py | 1 | 1061 | # -*- coding: utf-8 -*-
import logging
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.response import TemplateResponse
from models import SimpleProduct, ComplexProduct
logger = logging.getLogger('ccbasket')
def home(request):
return render_to_response('shop/home.html', {
}, context_instance=RequestContext(request))
def product(request, pk, model=SimpleProduct, template='shop/product.html'):
product = model.objects.get(pk=pk)
return render_to_response(template, {
'product': product
}, context_instance=RequestContext(request))
def index(request):
simple_products = SimpleProduct.objects.all()
complex_products = ComplexProduct.objects.all()
products = []
for product in simple_products:
products.append(product)
for product in complex_products:
products.append(product)
return render_to_response('shop/index.html', {
'products': products,
}, context_instance=RequestContext(request))
| bsd-3-clause | -8,419,915,165,657,524,000 | 26.921053 | 76 | 0.71065 | false | 4.278226 | false | false | false |
david-abel/simple_rl | simple_rl/tasks/maze_1d/Maze1DPOMDPClass.py | 1 | 2420 | # Python imports.
from collections import defaultdict
import random
# Other imports.
from simple_rl.pomdp.POMDPClass import POMDP
from simple_rl.tasks.maze_1d.Maze1DStateClass import Maze1DState
class Maze1DPOMDP(POMDP):
''' Class for a 1D Maze POMDP '''
ACTIONS = ['west', 'east']
OBSERVATIONS = ['nothing', 'goal']
def __init__(self):
self._states = [Maze1DState('left'), Maze1DState('middle'), Maze1DState('right'), Maze1DState('goal')]
# Initial belief is a uniform distribution over states
b0 = defaultdict()
for state in self._states: b0[state] = 0.25
POMDP.__init__(self, Maze1DPOMDP.ACTIONS, Maze1DPOMDP.OBSERVATIONS, self._transition_func, self._reward_func, self._observation_func, b0)
def _transition_func(self, state, action):
'''
Args:
state (Maze1DState)
action (str)
Returns:
next_state (Maze1DState)
'''
if action == 'west':
if state.name == 'left':
return Maze1DState('left')
if state.name == 'middle':
return Maze1DState('left')
if state.name == 'right':
return Maze1DState('goal')
if state.name == 'goal':
return Maze1DState(random.choice(['left', 'middle', 'right']))
if action == 'east':
if state.name == 'left':
return Maze1DState('middle')
if state.name == 'middle':
return Maze1DState('goal')
if state.name == 'right':
return Maze1DState('right')
if state.name == 'goal':
return Maze1DState(random.choice(['left', 'middle', 'right']))
raise ValueError('Invalid state: {} action: {} in 1DMaze'.format(state, action))
def _observation_func(self, state, action):
next_state = self._transition_func(state, action)
return 'goal' if next_state.name == 'goal' else 'nothing'
def _reward_func(self, state, action, next_state):
# next_state = self._transition_func(state, action)
observation = self._observation_func(state, action)
return (1. - self.step_cost) if (next_state.name == observation == 'goal') else (0. - self.step_cost)
def is_in_goal_state(self):
return self.cur_state.name == 'goal'
if __name__ == '__main__':
maze_pomdp = Maze1DPOMDP()
| apache-2.0 | 2,584,774,365,799,531,000 | 35.666667 | 145 | 0.579752 | false | 3.543192 | false | false | false |
tenable/Tenable.io-SDK-for-Python | tenable_io/api/plugins.py | 1 | 1572 | from tenable_io.api.base import BaseApi
from tenable_io.api.models import PluginDetails, PluginFamilyDetails, PluginFamilyList
class PluginsApi(BaseApi):
def families(self, include_all=None):
"""Return list of plugin families.
:param include_all: Whether or not to include all plugins. Defaults to be less inclusive.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.PluginFamilyList`.
"""
params = {'all': include_all}
response = self._client.get('plugins/families', params={k: v for (k, v) in params.items() if v})
return PluginFamilyList.from_json(response.text)
def family_details(self, family_id):
"""Return plugin family details.
:param family_id: Plugin family ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.PluginFamilyDetails`.
"""
response = self._client.get('plugins/families/%(id)s', path_params={'id': family_id})
return PluginFamilyDetails.from_json(response.text)
def plugin_details(self, plugin_id):
"""Return plugin details.
:param plugin_id: Plugin ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.PluginDetails`.
"""
response = self._client.get('plugins/plugin/%(id)s', path_params={'id': plugin_id})
return PluginDetails.from_json(response.text)
| mit | 4,282,903,785,587,915,000 | 42.666667 | 104 | 0.67112 | false | 3.900744 | false | false | false |
anarchivist/pyflag | src/plugins/Flash/AdvancedCommands.py | 1 | 13927 | """ These Flash commands allow more sophisticated operations, most of
which may not be needed by most users. Some operations are
specifically designed for testing and have little use in practice.
"""
import pyflag.pyflagsh as pyflagsh
import pyflag.Registry as Registry
import pyflag.DB as DB
import fnmatch
import pyflag.FileSystem as FileSystem
import pyflag.Scanner as Scanner
import time, types
import pyflag.pyflaglog as pyflaglog
import BasicCommands
import pyflag.ScannerUtils as ScannerUtils
import pyflag.conf
config=pyflag.conf.ConfObject()
class scan_path(pyflagsh.command):
""" This takes a path as an argument and runs the specified scanner on the path
this might be of more use than specifying inodes for the average user since if you load
two disk images, then you might have /disk1 and /disk2 and want to just run scans over
one of them, which is simpler to specify using /disk1. """
def help(self):
return "scan VFSPath [list of scanners]: Scans the VFS path with the scanners specified"
def complete(self, text,state):
if len(self.args)>2 or len(self.args)==2 and not text:
scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] +\
[ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ]
return scanners[state]
else:
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(path,1,%r) as abbrev,path from file where path like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['path']
def wait_for_scan(self, cookie):
""" Waits for scanners to complete """
pdbh = DB.DBO()
pdbh.check_index('jobs','cookie')
## Often this process owns a worker as well. In that case we can wake it up:
import pyflag.Farm as Farm
Farm.wake_workers()
## Wait until there are no more jobs left.
while 1:
pdbh.execute("select count(*) as total from jobs where cookie=%r and arg1=%r",
(cookie,
self.environment._CASE))
row = pdbh.fetch()
if row['total']==0: break
time.sleep(1)
def execute(self):
scanners=[]
if len(self.args)<2:
yield self.help()
return
elif type(self.args[1]) == types.ListType:
scanners = self.args[1]
else:
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
## Assume that people always want recursive - I think this makes sense
path = self.args[0]
if not path.endswith("*"):
path = path + "*"
## FIXME For massive images this should be broken up, as in the old GUI method
dbh=DB.DBO(self.environment._CASE)
dbh.execute("select inode.inode from inode join file on file.inode = inode.inode where file.path rlike %r", fnmatch.translate(path))
pdbh = DB.DBO()
pdbh.mass_insert_start('jobs')
## This is a cookie used to identify our requests so that we
## can check they have been done later.
cookie = int(time.time())
for row in dbh:
inode = row['inode']
pdbh.mass_insert(
command = 'Scan',
arg1 = self.environment._CASE,
arg2 = row['inode'],
arg3 = ','.join(scanners),
cookie=cookie,
)#
pdbh.mass_insert_commit()
## Wait for the scanners to finish:
self.wait_for_scan(cookie)
yield "Scanning complete"
import pyflag.FlagFramework as FlagFramework
class init_flag_db(pyflagsh.command):
""" Creates the main flag db if needed """
def execute(self):
try:
dbh = DB.DBO()
except:
dbh = DB.DBO('mysql')
dbh.execute("create database `%s`" % config.FLAGDB)
dbh = DB.DBO()
FlagFramework.post_event("init_default_db", None)
yield "Done"
class delete_iosource(pyflagsh.command):
""" Deletes an iosource from the current case """
def complete(self, text, state):
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(name,1,%r) as abbrev,name from iosources where name like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['name']
def execute(self):
for iosource in self.args:
dbh = DB.DBO(self.environment._CASE)
dbh2 = dbh.clone()
dbh.delete('inode', where=DB.expand("inode like 'I%s|%%'", iosource))
dbh.execute("select * from filesystems where iosource = %r", iosource)
for row in dbh:
dbh2.delete('file', where=DB.expand("path like '%s%%'", iosource))
dbh.delete("iosources", where=DB.expand("name=%r", iosource))
yield "Removed IOSource %s" % iosource
class scan(pyflagsh.command):
""" Scan a glob of inodes with a glob of scanners """
def help(self):
return "scan inode [list of scanners]: Scans the inodes with the scanners specified"
def complete(self, text,state):
if len(self.args)>2 or len(self.args)==2 and not text:
scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] + \
[ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ]
return scanners[state]
else:
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(inode,1,%r) as abbrev,inode from inode where inode like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['inode']
def execute(self):
if len(self.args)<2:
yield self.help()
return
## Try to glob the inode list:
dbh=DB.DBO(self.environment._CASE)
dbh.execute("select inode from inode where inode rlike %r",fnmatch.translate(self.args[0]))
pdbh = DB.DBO()
pdbh.mass_insert_start('jobs')
## This is a cookie used to identify our requests so that we
## can check they have been done later.
cookie = int(time.time())
scanners = []
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
scanners = ScannerUtils.fill_in_dependancies(scanners)
for row in dbh:
inode = row['inode']
pdbh.mass_insert(
command = 'Scan',
arg1 = self.environment._CASE,
arg2 = row['inode'],
arg3 = ','.join(scanners),
cookie=cookie,
)
pdbh.mass_insert_commit()
## Wait for the scanners to finish:
if self.environment.interactive:
self.wait_for_scan(cookie)
yield "Scanning complete"
def wait_for_scan(self, cookie):
""" Waits for scanners to complete """
pdbh = DB.DBO()
## Often this process owns a worker as well. In that case we can wake it up:
import pyflag.Farm as Farm
Farm.wake_workers()
## Wait until there are no more jobs left.
while 1:
pdbh.execute("select count(*) as total from jobs where cookie=%r and arg1=%r", (cookie,
self.environment._CASE))
row = pdbh.fetch()
if row and row['total']==0: break
time.sleep(1)
class scan_file(scan,BasicCommands.ls):
""" Scan a file in the VFS by name """
def help(self):
return "scan file [list of scanners]: Scan the file with the scanners specified "
def complete(self, text,state):
if len(self.args)>2 or len(self.args)==2 and not text:
scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] +\
[ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ]
return scanners[state]
else:
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(path,1,%r) as abbrev,path from file where path like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['path']
def execute(self):
if len(self.args)<2:
yield self.help()
return
pdbh = DB.DBO()
pdbh.mass_insert_start('jobs')
cookie = int(time.time())
scanners = []
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
for path in self.glob_files(self.args[:1]):
path, inode, inode_id = self.environment._FS.lookup(path = path)
## This is a cookie used to identify our requests so that we
## can check they have been done later.
pdbh.mass_insert(
command = 'Scan',
arg1 = self.environment._CASE,
arg2 = inode,
arg3 = ','.join(scanners),
cookie=cookie,
)
pdbh.mass_insert_commit()
## Wait for the scanners to finish:
if 1 or self.environment.interactive:
self.wait_for_scan(cookie)
yield "Scanning complete"
##
## This allows people to reset based on the VFS path
##
class scanner_reset_path(scan):
""" Reset all files under a specified path """
def help(self):
return "scanner_reset_path path [list of scanners]: Resets the inodes under the path given with the scanners specified"
def execute(self):
if len(self.args)<2:
yield self.help()
return
scanners = []
if type(self.args[1]) == types.ListType:
scanners = self.args[1]
else:
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
print "GETTING FACTORIES"
factories = Scanner.get_factories(self.environment._CASE, scanners)
print "OK NOW RESETING EM"
for f in factories:
f.reset_entire_path(self.args[0])
print "HOKAY"
yield "Reset Complete"
## There is little point in distributing this because its very quick anyway.
class scanner_reset(scan):
""" Reset multiple inodes as specified by a glob """
def help(self):
return "reset inode [list of scanners]: Resets the inodes with the scanners specified"
def execute(self):
if len(self.args)<2:
yield self.help()
return
scanners = []
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
factories = Scanner.get_factories(self.environment._CASE, scanners)
for f in factories:
f.multiple_inode_reset(self.args[0])
yield "Resetting complete"
class load_and_scan(scan):
""" Load a filesystem and scan it at the same time """
def help(self):
return """load_and_scan iosource mount_point fstype [list of scanners]:
Loads the iosource into the right mount point and scans all
new inodes using the scanner list. This allows scanning to
start as soon as VFS inodes are produced and before the VFS is
fully populated.
"""
def complete(self, text,state):
if len(self.args)>4 or len(self.args)==4 and not text:
scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] + \
[ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ]
return scanners[state]
elif len(self.args)>3 or len(self.args)==3 and not text:
fstypes = [ x for x in Registry.FILESYSTEMS.class_names if x.startswith(text) ]
return fstypes[state]
elif len(self.args)>2 or len(self.args)==2 and not text:
return
elif len(self.args)>1 or len(self.args)==1 and not text:
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(value,1,%r) as abbrev,value from meta where property='iosource' and value like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['value']
def execute(self):
if len(self.args)<3:
yield self.help()
return
iosource=self.args[0]
mnt_point=self.args[1]
filesystem=self.args[2]
query = {}
dbh = DB.DBO()
dbh.mass_insert_start('jobs')
## This works out all the scanners that were specified:
tmp = []
for i in range(3,len(self.args)):
## Is it a parameter?
if "=" in self.args[i]:
prop,value = self.args[i].split("=",1)
query[prop] = value
else:
tmp.extend([x for x in fnmatch.filter(
Registry.SCANNERS.scanners, self.args[i]) ])
scanners = [ ]
for item in tmp:
if item not in scanners:
scanners.append(item)
## Load the filesystem:
try:
fs = Registry.FILESYSTEMS.dispatch(filesystem)
except KeyError:
yield "Unable to find a filesystem of %s" % filesystem
return
fs=fs(self.environment._CASE, query)
fs.cookie = int(time.time())
fs.load(mnt_point, iosource, scanners)
## Wait for all the scanners to finish
self.wait_for_scan(fs.cookie)
yield "Loading complete"
| gpl-2.0 | -4,366,271,114,927,967,700 | 36.438172 | 178 | 0.575644 | false | 3.877227 | false | false | false |
mobarski/sandbox | parallel/p7cat.py | 1 | 1316 | ## p7cat.py - parallel concatenation
## (c) 2017 by mobarski (at) gmail (dot) com
## licence: MIT
## version: x1
from __future__ import print_function
import sys
import os
from multiprocessing import Process
from time import time
def write_part(path_in, path_out, offset, blocksize=4096):
fi = open(path_in,'rb')
fo = open(path_out,'r+b')
fo.seek(offset)
while True:
block = fi.read(blocksize)
fo.write(block)
if len(block)<blocksize: break
fi.close()
fo.close()
if __name__ == "__main__":
t0 = time()
print("\n\tP7 CONCAT START\n")
outpath = sys.argv[1]
filenames = sys.argv[2:]
#print('\tOUT',outpath)
#print('\tIN\n',filenames)
meta = {} # filename -> size, offset
offset = 0
for path in filenames:
size = os.path.getsize(path)
meta[path] = (size,offset)
offset += size
# allocate disk space
out = open(outpath,'wb')
out.seek(offset-1)
out.write(b'\x00')
out.close()
proc = {}
for path in filenames:
size,offset = meta[path]
p = Process(target=write_part, args=(path, outpath, offset))
p.start()
print("\tBEGIN pid:{0} size:{2} offset:{1}".format(p.pid,offset,size))
proc[path] = p
sys.stdout.flush()
for path in filenames:
p = proc[path]
p.join()
print("\tEND pid:{0}".format(p.pid))
print("\n\tRUN_TIME_TOTAL:{0:.1f}s\n".format(time()-t0))
| mit | -6,190,227,077,822,655,000 | 20.933333 | 75 | 0.647416 | false | 2.626747 | false | false | false |
stoq/stoqdrivers | docs/fiscal-driver-template.py | 1 | 5263 | #
# Stoqdrivers template driver
#
# Copyright (C) 2007 Async Open Source <http://www.async.com.br>
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
# USA.
#
import datetime
from decimal import Decimal
from zope.interface import implementer
from stoqdrivers.enum import TaxType
from stoqdrivers.interfaces import ICouponPrinter
from stoqdrivers.printers.capabilities import Capability
from stoqdrivers.printers.fiscal import SintegraData
from stoqdrivers.serialbase import SerialBase
from stoqdrivers.translation import stoqdrivers_gettext
_ = stoqdrivers_gettext
@implementer(ICouponPrinter)
class TemplateDriver(SerialBase):
supported = True
model_name = "Template Driver"
coupon_printer_charset = "ascii"
def __init__(self, port, consts=None):
SerialBase.__init__(self, port)
#
# This implements the ICouponPrinter Interface
#
# Coupon methods
def coupon_identify_customer(self, customer, address, document):
pass
def coupon_open(self):
pass
def coupon_cancel(self):
pass
def coupon_close(self, message):
coupon_id = 123
return coupon_id
def coupon_add_item(self, code, description, price, taxcode,
quantity, unit, discount, markup, unit_desc):
item_id = 123
return item_id
def coupon_cancel_item(self, item_id):
pass
def coupon_add_payment(self, payment_method, value, description):
return Decimal("123")
def coupon_totalize(self, discount, markup, taxcode):
return Decimal("123")
# Till / Daily flow
def summarize(self):
# Leitura X
pass
def close_till(self, previous_day):
# Redução Z
pass
def till_add_cash(self, value):
# Suprimento
pass
def till_remove_cash(self, value):
# Sangria
pass
def till_read_memory(self, start, end):
# Leitura Memory Fiscal data
pass
def till_read_memory_by_reductions(self, start, end):
# Leitura Memory Fiscal reduções
pass
# Introspection
def get_capabilities(self):
return dict(
item_code=Capability(max_len=13),
item_id=Capability(digits=4),
items_quantity=Capability(min_size=1, digits=4, decimals=3),
item_price=Capability(digits=6, decimals=2),
item_description=Capability(max_len=29),
payment_value=Capability(digits=12, decimals=2),
promotional_message=Capability(max_len=320),
payment_description=Capability(max_len=48),
customer_name=Capability(max_len=30),
customer_id=Capability(max_len=28),
customer_address=Capability(max_len=80),
add_cash_value=Capability(min_size=0.1, digits=12, decimals=2),
remove_cash_value=Capability(min_size=0.1, digits=12, decimals=2),
)
def get_constants(self):
return self._consts
def get_tax_constants(self):
constants = []
constants.append((TaxType.CUSTOM,
'01',
Decimal('18.00')))
constants.append((TaxType.CUSTOM,
'02',
Decimal('25.00')))
constants.extend([
(TaxType.SUBSTITUTION, 'FF', None),
(TaxType.EXEMPTION, 'II', None),
(TaxType.NONE, 'NN', None),
])
return constants
def get_payment_constants(self):
methods = []
methods.append(('01', 'DINHEIRO'))
methods.append(('02', 'CHEQUE'))
return methods
def get_sintegra(self):
taxes = []
taxes.append(('2500', Decimal("0")))
taxes.append(('1800', Decimal("0")))
taxes.append(('CANC', Decimal("0")))
taxes.append(('DESC', Decimal("0")))
taxes.append(('I', Decimal("0")))
taxes.append(('N', Decimal("0")))
taxes.append(('F', Decimal("0")))
return SintegraData(
opening_date=datetime.date(2000, 1, 1),
serial=self._get_serial(),
serial_id='001',
coupon_start=0,
coupon_end=100,
cro=230,
crz=1232,
coo=320,
period_total=Decimal("1123"),
total=Decimal("2311123"),
taxes=taxes)
# Device detection, asynchronous
def query_status(self):
return 'XXX'
def status_reply_complete(self, reply):
return len(reply) == 23
def get_serial(self):
return 'ABC12345678'
| lgpl-2.1 | -2,792,795,015,464,589,300 | 27.737705 | 78 | 0.610953 | false | 3.969057 | false | false | false |
waheedahmed/edx-platform | openedx/core/djangoapps/api_admin/views.py | 1 | 9646 | """Views for API management."""
import logging
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.urlresolvers import reverse_lazy, reverse
from django.http.response import JsonResponse
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.generic import View
from django.views.generic.base import TemplateView
from django.views.generic.edit import CreateView
from oauth2_provider.generators import generate_client_secret, generate_client_id
from oauth2_provider.models import get_application_model
from oauth2_provider.views import ApplicationRegistration
from slumber.exceptions import HttpNotFoundError
from edxmako.shortcuts import render_to_response
from openedx.core.djangoapps.api_admin.decorators import require_api_access
from openedx.core.djangoapps.api_admin.forms import ApiAccessRequestForm, CatalogForm
from openedx.core.djangoapps.api_admin.models import ApiAccessRequest, Catalog
from openedx.core.djangoapps.api_admin.utils import course_discovery_api_client
log = logging.getLogger(__name__)
Application = get_application_model() # pylint: disable=invalid-name
class ApiRequestView(CreateView):
"""Form view for requesting API access."""
form_class = ApiAccessRequestForm
template_name = 'api_admin/api_access_request_form.html'
success_url = reverse_lazy('api_admin:api-status')
def get(self, request):
"""
If the requesting user has already requested API access, redirect
them to the client creation page.
"""
if ApiAccessRequest.api_access_status(request.user) is not None:
return redirect(reverse('api_admin:api-status'))
return super(ApiRequestView, self).get(request)
def form_valid(self, form):
form.instance.user = self.request.user
form.instance.site = get_current_site(self.request)
return super(ApiRequestView, self).form_valid(form)
class ApiRequestStatusView(ApplicationRegistration):
"""View for confirming our receipt of an API request."""
success_url = reverse_lazy('api_admin:api-status')
def get(self, request, form=None): # pylint: disable=arguments-differ
"""
If the user has not created an API request, redirect them to the
request form. Otherwise, display the status of their API
request. We take `form` as an optional argument so that we can
display validation errors correctly on the page.
"""
if form is None:
form = self.get_form_class()()
user = request.user
try:
api_request = ApiAccessRequest.objects.get(user=user)
except ApiAccessRequest.DoesNotExist:
return redirect(reverse('api_admin:api-request'))
try:
application = Application.objects.get(user=user)
except Application.DoesNotExist:
application = None
# We want to fill in a few fields ourselves, so remove them
# from the form so that the user doesn't see them.
for field in ('client_type', 'client_secret', 'client_id', 'authorization_grant_type'):
form.fields.pop(field)
return render_to_response('api_admin/status.html', {
'status': api_request.status,
'api_support_link': settings.API_DOCUMENTATION_URL,
'api_support_email': settings.API_ACCESS_MANAGER_EMAIL,
'form': form,
'application': application,
})
def get_form(self, form_class=None):
form = super(ApiRequestStatusView, self).get_form(form_class)
# Copy the data, since it's an immutable QueryDict.
copied_data = form.data.copy()
# Now set the fields that were removed earlier. We give them
# confidential client credentials, and generate their client
# ID and secret.
copied_data.update({
'authorization_grant_type': Application.GRANT_CLIENT_CREDENTIALS,
'client_type': Application.CLIENT_CONFIDENTIAL,
'client_secret': generate_client_secret(),
'client_id': generate_client_id(),
})
form.data = copied_data
return form
def form_valid(self, form):
# Delete any existing applications if the user has decided to regenerate their credentials
Application.objects.filter(user=self.request.user).delete()
return super(ApiRequestStatusView, self).form_valid(form)
def form_invalid(self, form):
return self.get(self.request, form)
@require_api_access
def post(self, request):
return super(ApiRequestStatusView, self).post(request)
class ApiTosView(TemplateView):
"""View to show the API Terms of Service."""
template_name = 'api_admin/terms_of_service.html'
class CatalogSearchView(View):
"""View to search for catalogs belonging to a user."""
def get(self, request):
"""Display a form to search for catalogs belonging to a user."""
return render_to_response('api_admin/catalogs/search.html')
def post(self, request):
"""Redirect to the list view for the given user."""
username = request.POST.get('username')
# If no username is provided, bounce back to this page.
if not username:
return redirect(reverse('api_admin:catalog-search'))
return redirect(reverse('api_admin:catalog-list', kwargs={'username': username}))
class CatalogListView(View):
"""View to list existing catalogs and create new ones."""
template = 'api_admin/catalogs/list.html'
def _get_catalogs(self, client, username):
"""Retrieve catalogs for a user. Returns the empty list if none are found."""
try:
response = client.api.v1.catalogs.get(username=username)
return [Catalog(attributes=catalog) for catalog in response['results']]
except HttpNotFoundError:
return []
def get(self, request, username):
"""Display a list of a user's catalogs."""
client = course_discovery_api_client(request.user)
catalogs = self._get_catalogs(client, username)
return render_to_response(self.template, {
'username': username,
'catalogs': catalogs,
'form': CatalogForm(initial={'viewers': [username]}),
'preview_url': reverse('api_admin:catalog-preview'),
'catalog_api_url': client.api.v1.courses.url(),
})
def post(self, request, username):
"""Create a new catalog for a user."""
form = CatalogForm(request.POST)
client = course_discovery_api_client(request.user)
if not form.is_valid():
catalogs = self._get_catalogs(client, username)
return render_to_response(self.template, {
'form': form,
'catalogs': catalogs,
'username': username,
'preview_url': reverse('api_admin:catalog-preview'),
'catalog_api_url': client.api.v1.courses.url(),
}, status=400)
attrs = form.instance.attributes
catalog = client.api.v1.catalogs.post(attrs)
return redirect(reverse('api_admin:catalog-edit', kwargs={'catalog_id': catalog['id']}))
class CatalogEditView(View):
"""View to edit an individual catalog."""
def get(self, request, catalog_id):
"""Display a form to edit this catalog."""
client = course_discovery_api_client(request.user)
response = client.api.v1.catalogs(catalog_id).get()
catalog = Catalog(attributes=response)
form = CatalogForm(instance=catalog)
return render_to_response('api_admin/catalogs/edit.html', {
'catalog': catalog,
'form': form,
'preview_url': reverse('api_admin:catalog-preview'),
'catalog_api_url': client.api.v1.courses.url(),
})
def post(self, request, catalog_id):
"""Update or delete this catalog."""
client = course_discovery_api_client(request.user)
if request.POST.get('delete-catalog') == 'on':
client.api.v1.catalogs(catalog_id).delete()
return redirect(reverse('api_admin:catalog-search'))
form = CatalogForm(request.POST)
if not form.is_valid():
response = client.api.v1.catalogs(catalog_id).get()
catalog = Catalog(attributes=response)
return render_to_response('api_admin/catalogs/edit.html', {
'catalog': catalog,
'form': form,
'preview_url': reverse('api_admin:catalog-preview'),
'catalog_api_url': client.api.v1.courses.url(),
}, status=400)
catalog = client.api.v1.catalogs(catalog_id).patch(form.instance.attributes)
return redirect(reverse('api_admin:catalog-edit', kwargs={'catalog_id': catalog['id']}))
class CatalogPreviewView(View):
"""Endpoint to preview courses for a query."""
def get(self, request):
"""
Return the results of a query against the course catalog API. If no
query parameter is given, returns an empty result set.
"""
client = course_discovery_api_client(request.user)
# Just pass along the request params including limit/offset pagination
if 'q' in request.GET:
results = client.api.v1.courses.get(**request.GET)
# Ensure that we don't just return all the courses if no query is given
else:
results = {'count': 0, 'results': [], 'next': None, 'prev': None}
return JsonResponse(results)
| agpl-3.0 | -1,986,569,222,691,623,200 | 40.222222 | 98 | 0.651669 | false | 4.16494 | false | false | false |
chfoo/fogchamp | util/csv2json.py | 1 | 4170 | '''Convert CSV files into JSON files needed for the visualizer page.'''
import argparse
import json
import os
import functools
from util.readers.addarash1 import AddarashReader
from util.readers.bulbapedia import BulbapediaReader
from util.readers.chfoo import ChfooReader
from util.readers.editornotes import EditorNotesReader
from util.readers.nkekev import NkekevReader
from util.readers.pokedex import PokedexReader
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--output-dir', default='./')
arg_parser.add_argument('--metadata-dir', default='metadata/')
args = arg_parser.parse_args()
nkekev_dir = os.path.join(args.metadata_dir, 'nkekev')
chfoo_dir = os.path.join(args.metadata_dir, 'chfoo')
addarash1_dir = os.path.join(args.metadata_dir, 'addarash1')
pokedex_dir = os.path.join(args.metadata_dir, 'pokedex', 'pokedex', 'data', 'csv')
bulbapedia_dir = os.path.join(args.metadata_dir, 'bulbapedia')
editor_notes_dir = os.path.join(args.metadata_dir, 'editor_notes')
output_dir = args.output_dir
pokedex_reader = PokedexReader(pokedex_dir)
nkekev_reader = NkekevReader(nkekev_dir)
chfoo_reader = ChfooReader(chfoo_dir)
addarash1_reader = AddarashReader(addarash1_dir)
bulbapedia_reader = BulbapediaReader(bulbapedia_dir)
editor_notes_reader = EditorNotesReader(editor_notes_dir)
# Build each Pokemon's stats
movesets_funcs = [
('pbr-2.0', functools.partial(
addarash1_reader.read_pbr_2_0, nkekev_reader, chfoo_reader)),
('pbr-gold-1.2-2015-11-07', functools.partial(
addarash1_reader.read_pbr_gold_1_2_2015_11_07,
nkekev_reader, chfoo_reader)),
('pbr-gold-1.2', functools.partial(addarash1_reader.read_pbr_gold_1_2,
nkekev_reader, chfoo_reader)),
('pbr-seel', functools.partial(chfoo_reader.read_pbr_seel, nkekev_reader)),
('pbr-platinum', nkekev_reader.read_pbr_platinum),
('pbr-gold', nkekev_reader.read_pbr_gold),
]
for move_slug, func in movesets_funcs:
pokemon_stats = {}
pokemon_slugs = []
pokemon_types = pokedex_reader.read_pokemon_types()
pokemon_weights = pokedex_reader.read_pokemon_weights()
for pokemon_stat in func():
slug = pokemon_stat.pop('slug')
pokemon_slugs.append(slug)
pokemon_stats[slug] = pokemon_stat
pokemon_stats[slug]['types'] = pokemon_types[pokemon_stat['number']]
pokemon_stats[slug]['weight'] = pokemon_weights[pokemon_stat['number']]
json_path = os.path.join(output_dir, '{}.json'.format(move_slug))
with open(json_path, 'w') as file:
file.write(json.dumps({
'stats': pokemon_stats,
'pokemon_slugs': pokemon_slugs
}, indent=2, sort_keys=True))
# Build all the moves
move_stats = {}
for move in pokedex_reader.read_moves():
slug = move.pop('slug')
move_stats[slug] = move
bulbapedia_reader.downgrade_move_changes(move_stats)
editor_notes_reader.add_move_notes(move_stats)
json_path = os.path.join(output_dir, 'moves.json')
with open(json_path, 'w') as file:
file.write(json.dumps(move_stats, indent=2, sort_keys=True))
# Build descriptions and misc
abilities = {}
for ability in pokedex_reader.read_abilities():
slug = ability.pop('slug')
abilities[slug] = ability
editor_notes_reader.add_ability_notes(abilities)
types_efficacy = pokedex_reader.read_type_efficacy()
items = {}
for item in pokedex_reader.read_items():
slug = item.pop('slug')
items[slug] = item
item_renames = bulbapedia_reader.get_item_renames_map()
json_path = os.path.join(output_dir, 'descriptions.json')
with open(json_path, 'w') as file:
file.write(json.dumps({
'abilities': abilities,
'types_efficacy': types_efficacy,
'items': items,
'item_renames': item_renames,
}, indent=2, sort_keys=True))
if __name__ == '__main__':
main()
| mit | -2,807,500,552,308,781,000 | 35.26087 | 86 | 0.642206 | false | 3.178354 | false | false | false |
vpelletier/neoppod | neo/lib/event.py | 1 | 9556 | #
# Copyright (C) 2006-2016 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, thread
from time import time
from select import epoll, EPOLLIN, EPOLLOUT, EPOLLERR, EPOLLHUP
from errno import EAGAIN, EEXIST, EINTR, ENOENT
from . import logging
from .locking import Lock
class EpollEventManager(object):
"""This class manages connections and events based on epoll(5)."""
_timeout = None
_trigger_exit = False
def __init__(self):
self.connection_dict = {}
# Initialize a dummy 'unregistered' for the very rare case a registered
# connection is closed before the first call to poll. We don't care
# leaking a few integers for connections closed between 2 polls.
self.unregistered = []
self.reader_set = set()
self.writer_set = set()
self.epoll = epoll()
self._pending_processing = []
self._trigger_fd, w = os.pipe()
os.close(w)
self._trigger_lock = Lock()
def close(self):
os.close(self._trigger_fd)
for c in self.connection_dict.values():
c.close()
del self.__dict__
def getConnectionList(self):
# XXX: use index
return [x for x in self.connection_dict.itervalues()
if not x.isAborted()]
def getClientList(self):
# XXX: use index
return [c for c in self.getConnectionList() if c.isClient()]
def getServerList(self):
# XXX: use index
return [c for c in self.getConnectionList() if c.isServer()]
def getConnectionListByUUID(self, uuid):
""" Return the connection associated to the UUID, None if the UUID is
None, invalid or not found"""
# XXX: use index
# XXX: consider remove UUID from connection and thus this method
if uuid is None:
return None
result = []
append = result.append
for conn in self.getConnectionList():
if conn.getUUID() == uuid:
append(conn)
return result
# epoll_wait always waits for EPOLLERR & EPOLLHUP so we're forced
# to unregister when we want to ignore all events for a connection.
def register(self, conn, timeout_only=False):
fd = conn.getConnector().getDescriptor()
self.connection_dict[fd] = conn
if timeout_only:
self.wakeup()
else:
self.epoll.register(fd)
self.addReader(conn)
def unregister(self, conn):
new_pending_processing = [x for x in self._pending_processing
if x is not conn]
# Check that we removed at most one entry from
# self._pending_processing .
assert len(new_pending_processing) > len(self._pending_processing) - 2
self._pending_processing = new_pending_processing
fd = conn.getConnector().getDescriptor()
try:
del self.connection_dict[fd]
self.unregistered.append(fd)
self.epoll.unregister(fd)
except KeyError:
pass
except IOError, e:
if e.errno != ENOENT:
raise
else:
self.reader_set.discard(fd)
self.writer_set.discard(fd)
def isIdle(self):
return not (self._pending_processing or self.writer_set)
def _addPendingConnection(self, conn):
pending_processing = self._pending_processing
if conn not in pending_processing:
pending_processing.append(conn)
def poll(self, blocking=1):
if not self._pending_processing:
# Fetch messages from polled file descriptors
self._poll(blocking)
if not self._pending_processing:
return
to_process = self._pending_processing.pop(0)
try:
to_process.process()
finally:
# ...and requeue if there are pending messages
if to_process.hasPendingMessages():
self._addPendingConnection(to_process)
# Non-blocking call: as we handled a packet, we should just offer
# poll a chance to fetch & send already-available data, but it must
# not delay us.
self._poll(0)
def _poll(self, blocking):
if blocking:
timeout = self._timeout
timeout_object = self
for conn in self.connection_dict.itervalues():
t = conn.getTimeout()
if t and (timeout is None or t < timeout):
timeout = t
timeout_object = conn
# Make sure epoll_wait does not return too early, because it has a
# granularity of 1ms and Python 2.7 rounds the timeout towards zero.
# See also https://bugs.python.org/issue20452 (fixed in Python 3).
blocking = .001 + max(0, timeout - time()) if timeout else -1
try:
event_list = self.epoll.poll(blocking)
except IOError, exc:
if exc.errno in (0, EAGAIN):
logging.info('epoll.poll triggered undocumented error %r',
exc.errno)
elif exc.errno != EINTR:
raise
return
if event_list:
self.unregistered = unregistered = []
wlist = []
elist = []
for fd, event in event_list:
if event & EPOLLIN:
conn = self.connection_dict[fd]
if conn.readable():
self._addPendingConnection(conn)
if event & EPOLLOUT:
wlist.append(fd)
if event & (EPOLLERR | EPOLLHUP):
elist.append(fd)
for fd in wlist:
if fd not in unregistered:
self.connection_dict[fd].writable()
for fd in elist:
if fd in unregistered:
continue
try:
conn = self.connection_dict[fd]
except KeyError:
assert fd == self._trigger_fd, fd
with self._trigger_lock:
self.epoll.unregister(fd)
if self._trigger_exit:
del self._trigger_exit
thread.exit()
continue
if conn.readable():
self._addPendingConnection(conn)
elif blocking > 0:
logging.debug('timeout triggered for %r', timeout_object)
timeout_object.onTimeout()
def onTimeout(self):
on_timeout = self._on_timeout
del self._on_timeout
self._timeout = None
on_timeout()
def setTimeout(self, *args):
self._timeout, self._on_timeout = args
def wakeup(self, exit=False):
with self._trigger_lock:
self._trigger_exit |= exit
try:
self.epoll.register(self._trigger_fd)
except IOError, e:
# Ignore if 'wakeup' is called several times in a row.
if e.errno != EEXIST:
raise
def addReader(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd not in self.reader_set:
self.reader_set.add(fd)
self.epoll.modify(fd, EPOLLIN | (
fd in self.writer_set and EPOLLOUT))
def removeReader(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd in self.reader_set:
self.reader_set.remove(fd)
self.epoll.modify(fd, fd in self.writer_set and EPOLLOUT)
def addWriter(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd not in self.writer_set:
self.writer_set.add(fd)
self.epoll.modify(fd, EPOLLOUT | (
fd in self.reader_set and EPOLLIN))
def removeWriter(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd in self.writer_set:
self.writer_set.remove(fd)
self.epoll.modify(fd, fd in self.reader_set and EPOLLIN)
def log(self):
logging.info('Event Manager:')
logging.info(' Readers: %r', list(self.reader_set))
logging.info(' Writers: %r', list(self.writer_set))
logging.info(' Connections:')
pending_set = set(self._pending_processing)
for fd, conn in self.connection_dict.items():
logging.info(' %r: %r (pending=%r)', fd, conn,
conn in pending_set)
# Default to EpollEventManager.
EventManager = EpollEventManager
| gpl-2.0 | 4,390,888,818,905,951,000 | 36.03876 | 80 | 0.57409 | false | 4.287124 | false | false | false |
AndKyr/GETELEC | python/JFplot.py | 1 | 1648 | #! /usr/bin/python
import numpy as np
import getelec_mod as gt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mb
font = 30
# mb.rcParams["font.family"] = "Serif"
mb.rcParams["font.size"] = font
mb.rcParams["axes.labelsize"] = font
mb.rcParams["xtick.labelsize"] = font
mb.rcParams["ytick.labelsize"] = font
mb.rcParams["legend.fontsize"] = font
mb.rcParams["lines.linewidth"] = 2.5
fsize = (18,10)
Npoints = 256
Temps = [1.e-2, 300, 800, 1500]
Xfn = np.linspace(0.12, 0.35, 256)
F = 1./Xfn
Jem = np.copy(F)
this = gt.emission_create(W = 4.5, R = 5000., approx = 2)
fig1 = plt.figure(figsize=fsize)
ax1 = fig1.gca()
ax1.set_xlabel(r"$1/F$ [m GV$^{-1}$]")
ax1.set_ylabel(r"$J$ [A nm$^{-2}$]")
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
for i in range(len(Temps)):
this.Temp = Temps[i]
if (this.Temp < 10.):
this.approx = -1
else:
this.approx = 2
for j in range(len(F)):
this.F = F[j]
this.cur_dens()
Jem[j] = this.Jem
ax1.semilogy(Xfn,Jem, label = r'T = %d K'%this.Temp)
# for i in range(len(Temps)):
# this.Temp = Temps[i]
# if (this.Temp < 10.):
# this.approx = -1
# else:
# this.approx = -1
# for j in range(len(F)):
# this.F = F[j]
# this.cur_dens()
# Jem[j] = this.Jem
# ax1.semilogy(Xfn,Jem, '--', color = colors[i], label = r'T = %d K'%this.Temp)
# np.savetxt("J-F.dat", np.transpose(np.array([F,Jem])), delimiter = " ")
ax1.grid()
ax1.legend()
plt.savefig("JFplot_Tparam.svg")
plt.savefig("JFplot_Tparam.png")
plt.show()
| gpl-3.0 | -1,551,624,963,708,100,600 | 20.402597 | 83 | 0.586772 | false | 2.478195 | false | false | false |
goyal-sidd/BLT | website/models.py | 1 | 10857 | import os
from urlparse import urlparse
import requests
import tweepy
from PIL import Image
from annoying.fields import AutoOneToOneField
from colorthief import ColorThief
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.db import models
from django.db.models import Count
from django.db.models import signals
from django.db.models.signals import post_save
from unidecode import unidecode
class Domain(models.Model):
name = models.CharField(max_length=255, unique=True)
url = models.URLField()
logo = models.ImageField(upload_to="logos", null=True, blank=True)
webshot = models.ImageField(upload_to="webshots", null=True, blank=True)
clicks = models.IntegerField(null=True, blank=True)
email_event = models.CharField(max_length=255, default="", null=True, blank=True)
color = models.CharField(max_length=10, null=True, blank=True)
github = models.CharField(max_length=255, null=True, blank=True)
email = models.EmailField(null=True, blank=True)
twitter = models.CharField(max_length=30, null=True, blank=True)
facebook = models.URLField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
@property
def open_issues(self):
return Issue.objects.filter(domain=self).exclude(status="closed")
@property
def closed_issues(self):
return Issue.objects.filter(domain=self).filter(status="closed")
@property
def top_tester(self):
return User.objects.filter(issue__domain=self).annotate(total=Count('issue')).order_by('-total').first()
@property
def get_name(self):
parsed_url = urlparse(self.url)
return parsed_url.netloc.split(".")[-2:][0].title()
def get_logo(self):
if self.logo:
return self.logo.url
image_request = requests.get("https://logo.clearbit.com/" + self.name)
try:
if image_request.status_code == 200:
image_content = ContentFile(image_request.content)
self.logo.save(self.name + ".jpg", image_content)
return self.logo.url
except:
favicon_url = self.url + '/favicon.ico'
return favicon_url
@property
def get_color(self):
if self.color:
return self.color
else:
if not self.logo:
self.get_logo()
try:
color_thief = ColorThief(self.logo)
self.color = '#%02x%02x%02x' % color_thief.get_color(quality=1)
except:
self.color = "#0000ff"
self.save()
return self.color
@property
def hostname_domain(self):
parsed_url = urlparse(self.url)
return parsed_url.hostname
@property
def domain_name(self):
parsed_url = urlparse(self.url)
domain = parsed_url.hostname
temp = domain.rsplit('.')
if (len(temp) == 3):
domain = temp[1] + '.' + temp[2]
return domain
def get_absolute_url(self):
return "/domain/" + self.name
def validate_image(fieldfile_obj):
filesize = fieldfile_obj.file.size
megabyte_limit = 3.0
if filesize > megabyte_limit * 1024 * 1024:
raise ValidationError("Max file size is %sMB" % str(megabyte_limit))
class Issue(models.Model):
labels = (
(0, 'General'),
(1, 'Number Error'),
(2, 'Functional'),
(3, 'Performance'),
(4, 'Security'),
(5, 'Typo'),
(6, 'Design')
)
user = models.ForeignKey(User, null=True, blank=True)
domain = models.ForeignKey(Domain, null=True, blank=True)
url = models.URLField()
description = models.TextField()
label = models.PositiveSmallIntegerField(choices=labels, default=0)
views = models.IntegerField(null=True, blank=True)
status = models.CharField(max_length=10, default="open", null=True, blank=True)
user_agent = models.CharField(max_length=255, default="", null=True, blank=True)
ocr = models.TextField(default="", null=True, blank=True)
screenshot = models.ImageField(upload_to="screenshots", validators=[validate_image])
closed_by = models.ForeignKey(User, null=True, blank=True, related_name="closed_by")
closed_date = models.DateTimeField(default=None, null=True, blank=True)
github_url = models.URLField(default="", null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.description
@property
def domain_title(self):
parsed_url = urlparse(self.url)
return parsed_url.netloc.split(".")[-2:][0].title()
@property
def hostname_domain(self):
parsed_url = urlparse(self.url)
return parsed_url.hostname
@property
def domain_name(self):
parsed_url = urlparse(self.url)
domain = parsed_url.hostname
temp = domain.rsplit('.')
if (len(temp) == 3):
domain = temp[1] + '.' + temp[2]
return domain
def get_twitter_message(self):
issue_link = " bugheist.com/issue/" + str(self.id)
prefix = "Bug found on @"
spacer = " | "
msg = prefix + self.domain_title + spacer + self.description[:140 - (
len(prefix) + len(self.domain_title) + len(spacer) + len(issue_link))] + issue_link
return msg
def get_ocr(self):
if self.ocr:
return self.ocr
else:
try:
import pytesseract
self.ocr = pytesseract.image_to_string(Image.open(self.screenshot))
self.save()
return self.ocr
except:
return "OCR not installed"
@property
def get_absolute_url(self):
return "/issue/" + str(self.id)
class Meta:
ordering = ['-created']
TWITTER_MAXLENGTH = getattr(settings, 'TWITTER_MAXLENGTH', 140)
def post_to_twitter(sender, instance, *args, **kwargs):
if not kwargs.get('created'):
return False
try:
consumer_key = os.environ['TWITTER_CONSUMER_KEY']
consumer_secret = os.environ['TWITTER_CONSUMER_SECRET']
access_key = os.environ['TWITTER_ACCESS_KEY']
access_secret = os.environ['TWITTER_ACCESS_SECRET']
except KeyError:
print 'WARNING: Twitter account not configured.'
return False
try:
text = instance.get_twitter_message()
except AttributeError:
text = unicode(instance)
mesg = u'%s' % (text)
if len(mesg) > TWITTER_MAXLENGTH:
size = len(mesg + '...') - TWITTER_MAXLENGTH
mesg = u'%s...' % (text[:-size])
import logging
logger = logging.getLogger('testlogger')
if not settings.DEBUG:
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
file = default_storage.open(instance.screenshot.file.name, 'rb')
media_ids = api.media_upload(filename=unidecode(instance.screenshot.file.name), file=file)
params = dict(status=mesg, media_ids=[media_ids.media_id_string])
api.update_status(**params)
except Exception, ex:
print 'ERROR:', str(ex)
logger.debug('rem %s' % str(ex))
return False
signals.post_save.connect(post_to_twitter, sender=Issue)
class Hunt(models.Model):
user = models.ForeignKey(User)
url = models.URLField()
prize = models.IntegerField()
logo = models.ImageField(upload_to="logos", null=True, blank=True)
plan = models.CharField(max_length=10)
txn_id = models.CharField(max_length=50, null=True, blank=True)
color = models.CharField(max_length=10, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
@property
def domain_title(self):
parsed_url = urlparse(self.url)
return parsed_url.netloc.split(".")[-2:][0].title()
class Meta:
ordering = ['-id']
class Points(models.Model):
user = models.ForeignKey(User)
issue = models.ForeignKey(Issue, null=True, blank=True)
domain = models.ForeignKey(Domain, null=True, blank=True)
score = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
# @receiver(user_logged_in, dispatch_uid="some.unique.string.id.for.allauth.user_logged_in")
# def user_logged_in_(request, user, **kwargs):
# if not settings.TESTING:
# action.send(user, verb='logged in')
class InviteFriend(models.Model):
sender = models.ForeignKey(User)
recipient = models.EmailField()
sent = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
ordering = ('-sent',)
verbose_name = 'invitation'
verbose_name_plural = 'invitations'
def user_images_path(instance, filename):
from django.template.defaultfilters import slugify
filename, ext = os.path.splitext(filename)
return 'avatars/user_{0}/{1}{2}'.format(instance.user.id, slugify(filename), ext)
class UserProfile(models.Model):
title = (
(0, 'Unrated'),
(1, 'Bronze'),
(2, 'Silver'),
(3, 'Gold'),
(4, 'Platinum'),
)
follows = models.ManyToManyField('self', related_name='follower', symmetrical=False, blank=True)
user = AutoOneToOneField('auth.user', related_name="userprofile")
user_avatar = models.ImageField(upload_to=user_images_path, blank=True, null=True)
title = models.IntegerField(choices=title, default=0)
winnings = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
issue_upvoted = models.ManyToManyField(Issue, blank=True, related_name="upvoted")
issue_saved = models.ManyToManyField(Issue, blank=True, related_name="saved")
def avatar(self, size=36):
if self.user_avatar:
return self.user_avatar.url
for account in self.user.socialaccount_set.all():
if 'avatar_url' in account.extra_data:
return account.extra_data['avatar_url']
elif 'picture' in account.extra_data:
return account.extra_data['picture']
def __unicode__(self):
return self.user.email
def create_profile(sender, **kwargs):
user = kwargs["instance"]
if kwargs["created"]:
profile = UserProfile(user=user)
profile.save()
post_save.connect(create_profile, sender=User)
| agpl-3.0 | 2,311,262,947,357,958,700 | 32.717391 | 112 | 0.634153 | false | 3.692857 | false | false | false |
mark-r-g/hydrus | tests/test_rapidclus.py | 1 | 1819 | # Mark Gatheman <[email protected]>
#
# This file is part of Hydrus.
#
# Hydrus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hydrus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hydrus. If not, see <http://www.gnu.org/licenses/>.
import random
from collections import Counter
import numpy as np
from hydrus.rapidclus import close_outer, close_inner, choose_initial_seeds
from hydrus.rapidclus import rapidclus
def test_close_inner():
assert close_inner([1, 3, 6, 10, 11]) == (3, 4, 10, 11, 1)
assert close_inner(range(1, 100, 5)) == (0, 1, 1, 6, 5)
def test_close_outer():
assert close_outer([1, 3, 6, 10, 11], 7) == (2, 6, 1)
assert close_outer([1, 3, 6, 10, 11], 0) == (0, 1, 1)
assert close_outer([1, 3, 6, 10, 11], 111) == (4, 11, 100)
def test_choose_initial_seeds():
assert choose_initial_seeds([1, 3, 6, 10, 11, 100], 3) == [1, 11, 100]
assert choose_initial_seeds([1, 3, 6, 10, 11, 100], 5) == [1, 3, 6, 11, 100]
random.seed(36261837)
data = [int(random.gauss(0,1000)) for _ in range(100)]
assert choose_initial_seeds(data, 5) == [-2376, -862, 521, 1948, 3239]
def test_rapidclus():
random.seed(12521184)
data = [random.gauss(0,1) for _ in range(1000)]
assert sorted(Counter(rapidclus(data)).values()) == [34, 41, 233, 251, 441]
assert rapidclus(data) == rapidclus(np.array(data))
| gpl-3.0 | -8,838,705,027,626,463,000 | 35.38 | 80 | 0.671798 | false | 2.943366 | false | false | false |
labase/surdonews | src/surdonews/leao/main.py | 1 | 5477 | from jqueryui import jq
from browser import document, html
from superpython.virgem.main import Sala, Labirinto, Cena, INVENTARIO # importando do virgem
STYLE = dict(position="absolute", width=300, left=0, top=0, background="blue") # mudar cor do background lá embaixo
STYLE["min-height"] = "300px"
IMAGEM = "http://s16.postimg.org/k81hwi2n9/Desert.jpg"
class Leao:
SETOR = None
def __init__(self):
pass
def monta(self):
NONE = [None] * 4
imn = "https://upload.wikimedia.org/wikipedia/commons/1/1e/Est%C3%BAdio_-_TV_Cultura_Montenegro.jpg"
iml = "http://mochilaotrips.com/wp-content/uploads/2013/03/IMG_1447.jpg"
ims = "https://upload.wikimedia.org/wikipedia/commons/0/01/Morro_de_Castelo_Branco,_aspectos_1,_Castelo_Branco,_concelho_da_Horta,_ilha_do_Faial,_A%C3%A7ores,_Portugal.JPG"
imo = "http://www.unicos.cc/wp-content/uploads/2014/12/jornalismo-1-951x476.jpg"
irl = "http://www.vipcomm.com.br/site/upload/sbHulk_GN_150614026.jpg"
iro = "https://blogpontodeonibus.files.wordpress.com/2013/02/photodownload-php.jpg"
iro = "http://imagens.canaltech.com.br/38560.54878-Tirar-fotos.jpg"
irn = "http://7diasverdes.com.br/wp-content/uploads/2013/07/Bicicleta-de-passeio.jpg"
irs = "http://www.boulevardshopping.com.br/novo/wp-content/uploads/2012/02/Mcdonalds.jpg"
isn = "http://www.comercialvidoto.com.br/site/wgc_media/photos/Banco-pe-de-Ferro-Tamandua.png"
isl = "http://andif.com.br/imagens/noticias/Banco_Santander_mjg.jpg"
iso = "http://imguol.com/2013/01/08/fiat-mille-economy-1357657820399_956x500.jpg"
iss = "http://images.forwallpaper.com/files/images/a/a809/a809de18/32241/notepad.jpg"
desk = "https://blogpontodeonibus.files.wordpress.com/2012/07/expresso_brasileirold_chassiscania_1.jpg"
drawer = "http://s.glbimg.com/og/rg/f/original/2010/07/09/tiago606.jpg"
imageM = ""
sala_norte = Sala([isn, desk, iss, iso], NONE) # mar
sala_leste = Sala([isn, isl, iss, iso], NONE) # mar
sala_sul = Sala([irn, irl, irs, iro], NONE) # deserto
sala_oeste = Sala([isn, isl, iss, iso], NONE) # mar
salas = [sala_norte.norte, sala_leste.leste, sala_sul.sul, sala_oeste.oeste]
sala_centro = Sala([imn, iml, ims, imo], salas)
labirinto = Leao.SETOR = Labirinto([
sala_centro, sala_norte, sala_leste, sala_sul, sala_oeste])
labirinto.norte.leste.meio = Cena(img=imageM)
labirinto.sul.sul.meio = Cena(vai=self.help) # mudado
labirinto.leste.sul.meio = Cena(vai=self.pega_invent) # mudado
labirinto = Cena(vai=self.objetivo) # mudado
return labirinto
def nao_monta(self):
pass
def vai(self):
labirinto = self.monta()
self.monta = self.nao_monta
labirinto.centro.norte.vai()
return labirinto
"""def pega_card(self):
riocard = "https://www.cartaoriocard.com.br/rcc/static/img/personal-1.png" #link da imagem
flag = None
def clicou(_):
#hipótese de flag
input("Você não está num meio de transporte.")
if not "card" in INVENTARIO.inventario: #Se o Rio Card não estiver no inventário significa que ele pegou
input("Você pegou o RioCard.")
INVENTARIO.bota("card", riocard, clicou)
else:
input("Atenção: o inventário está vazio!")"""
def pega_invent(self):
riocard = "https://www.cartaoriocard.com.br/rcc/static/img/personal-1.png" # link da imagem
flag = None
def clicou(_):
# hipótese de flag
input("Você não está num meio de transporte.")
if not "card" in INVENTARIO.inventario: # Se o Rio Card não estiver no inventário significa que ele pegou
input("Você pegou o RioCard.")
INVENTARIO.bota("card", riocard, clicou)
else:
input("Atenção: o inventário está vazio!")
def help(self):
ajuda = "http://icons.iconarchive.com/icons/oxygen-icons.org/oxygen/256/Actions-help-hint-icon.png"
flag = None
def clicou(_):
# caso aconteça flag
input("Você precisa ir na sala à leste do atendimento.")
if not "ajuda" in INVENTARIO.inventario:
input("Você quer saber sobre o meu relátorio sobre a gripe? Ele na escrivaninha na sala lesta à recepção.")
INVENTARIO.bota("ajuda", ajuda, clicou)
else:
input("Achou o relatorio? Procurou na sala certa?")
"""
def objetivo(self):
ajuda = "http://www.iconsdownload.net/icons/256/11335-target-icon.png"
flag = None
def clicou(_):
input("Objetivo do programa: Você é um repórter e precisa achar o relatório com o resumo de todas as matérias que você vai conquistar nos diversos lugares do labirinto.")
"""
INSTANCIA = None
def leao():
def cria_leao():
global INSTANCIA
INSTANCIA = Leao()
if not INSTANCIA:
cria_leao()
return INSTANCIA
if __name__ == "__main__":
change_bg = "Para qual cor você quer mudar o plano de fundo? azul/branco"
escolha = input(change_bg)
if escolha == "azul":
background = "blue"
lab = leao()
print(INSTANCIA)
INVENTARIO.inicia()
lab.vai()
# lab.centro.norte.vai()
# lab.sul.oeste.meio = metro.centro.norte
| gpl-3.0 | -6,861,895,350,810,168,000 | 39.288889 | 182 | 0.633388 | false | 2.627536 | false | false | false |
levilucio/SyVOLT | GM2AUTOSAR_MM/transformation/HMapPartition.py | 1 | 3685 | from core.himesis import Himesis
import uuid
class HMapPartition(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule MapPartition.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMapPartition, self).__init__(name='HMapPartition', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """MapPartition"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MapPartition')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class Partition() node
self.add_node()
self.vs[3]["mm__"] = """Partition"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class Partition()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class PhysicalNode() node
self.add_node()
self.vs[5]["mm__"] = """PhysicalNode"""
self.vs[5]["attr1"] = """1"""
# match_contains node for class PhysicalNode()
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# match class Module() node
self.add_node()
self.vs[7]["mm__"] = """Module"""
self.vs[7]["attr1"] = """1"""
# match_contains node for class Module()
self.add_node()
self.vs[8]["mm__"] = """match_contains"""
# apply class SwcToEcuMapping() node
self.add_node()
self.vs[9]["mm__"] = """SwcToEcuMapping"""
self.vs[9]["attr1"] = """1"""
# apply_contains node for class SwcToEcuMapping()
self.add_node()
self.vs[10]["mm__"] = """apply_contains"""
# match association PhysicalNode--partition-->Partition node
self.add_node()
self.vs[11]["attr1"] = """partition"""
self.vs[11]["mm__"] = """directLink_S"""
# match association Partition--module-->Module node
self.add_node()
self.vs[12]["attr1"] = """module"""
self.vs[12]["mm__"] = """directLink_S"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class Partition()
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class PhysicalNode()
(0,8), # matchmodel -> match_contains
(8,7), # match_contains -> match_class Module()
(1,10), # applymodel -> apply_contains
(10,9), # apply_contains -> apply_class SwcToEcuMapping()
(5,11), # match_class PhysicalNode() -> association partition
(11,3), # association partition -> match_class Partition()
(3,12), # match_class Partition() -> association module
(12,7), # association module -> match_class Module()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((9,'shortName'),('concat',(('constant','Swc2EcuMapping_'),(3,'name')))), ]
| mit | 4,636,470,346,832,838,000 | 32.5 | 104 | 0.493623 | false | 3.962366 | false | false | false |
benpetty/Code-Katas | katas/sort_cards/sort_cards.py | 1 | 1347 | """Sort Cards.
https://www.codewars.com/kata/56f399b59821793533000683
Write a function sort_cards() that sorts a shuffled list of cards,
so that any given list of cards is sorted by rank,
no matter the starting collection.
All cards in the list are represented as strings,
so that sorted list of cards looks like this:
['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K']
Example:
>>> sort_cards(
['3', '9', 'A', '5', 'T', '8', '2', '4', 'Q', '7', 'J', '6', 'K']
['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K']
Hint: Tests will have many occurrences of same rank cards,
as well as vary in length. You can assume though,
that input list is always going to have at least 1 element.
"""
def sort_cards(cards):
"""Input a list of strings representing cards and return them sorted."""
rank = {
"A": 0,
"2": 1,
"3": 2,
"4": 3,
"5": 4,
"6": 5,
"7": 6,
"8": 7,
"9": 8,
"T": 9,
"J": 10,
"Q": 11,
"K": 12,
}
ranked = []
for card in cards:
card = str(card).upper()
if card in rank:
card = (rank[card], card)
ranked.append(card)
ranked = sorted(ranked)
result = []
for card in ranked:
result.append(card[1])
return result
| mit | 3,330,309,443,274,831,400 | 23.944444 | 76 | 0.513734 | false | 2.986696 | false | false | false |
karlnapf/kameleon-mcmc | kameleon_mcmc/tools/Visualise.py | 1 | 5656 | """
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Written (W) 2013 Heiko Strathmann
Written (W) 2013 Dino Sejdinovic
"""
from kameleon_mcmc.distribution.Gaussian import Gaussian
from matplotlib.patches import Ellipse
from matplotlib.pyplot import imshow, ylim, xlim, contour, plot, hold, gca
from numpy import linspace
from numpy.linalg.linalg import eigh
from numpy import zeros, array, exp, arctan2, sqrt
import numpy
class Visualise(object):
def __init__(self):
pass
@staticmethod
def get_plotting_arrays(distribution):
bounds = distribution.get_plotting_bounds()
assert(len(bounds) == 2)
Xs = linspace(bounds[0][0], bounds[0][1])
Ys = linspace(bounds[1][0], bounds[1][1])
return Xs, Ys
@staticmethod
def visualise_distribution(distribution, Z=None, log_density=False, Xs=None, Ys=None):
"""
Plots the density of a given Distribution instance and plots some
samples on top.
"""
if Xs is None or Ys is None:
Xs, Ys = Visualise.get_plotting_arrays(distribution)
Visualise.plot_density(distribution, Xs, Ys)
if Z is not None:
hold(True)
Visualise.plot_data(Z)
hold(False)
@staticmethod
def plot_density(distribution, Xs, Ys, log_domain=False):
"""
Plots a 2D density
density - density - distribution instance to plot
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
log_domain - if False, density will be put into exponential function
"""
assert(distribution.dimension == 2)
D = zeros((len(Xs), len(Ys)))
# compute log-density
for i in range(len(Xs)):
for j in range(len(Ys)):
x = array([[Xs[i], Ys[j]]])
D[j, i] = distribution.log_pdf(x)
if log_domain == False:
D = exp(D)
im = imshow(D, origin='lower')
im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()])
im.set_interpolation('nearest')
im.set_cmap('gray')
ylim([Ys.min(), Ys.max()])
xlim([Xs.min(), Xs.max()])
@staticmethod
def contour_plot_density(distribution, Xs=None, Ys=None, log_domain=False):
"""
Contour-plots a 2D density. If Gaussian, plots 1.96 interval contour only
density - distribution instance to plot
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
log_domain - if False, density will be put into exponential function
"""
if isinstance(distribution, Gaussian) and log_domain == False:
gca().add_artist(Visualise.get_gaussian_ellipse_artist(distribution))
gca().plot(distribution.mu[0], distribution.mu[1], 'r*', \
markersize=3.0, markeredgewidth=.1)
return
assert(distribution.dimension == 2)
if Xs is None:
(xmin, xmax), _ = distribution.get_plotting_bounds()
Xs = linspace(xmin, xmax)
if Ys is None:
_, (ymin, ymax) = distribution.get_plotting_bounds()
Ys = linspace(ymin, ymax)
D = zeros((len(Ys), len(Xs)))
# compute log-density
for i in range(len(Xs)):
for j in range(len(Ys)):
x = array([[Xs[i], Ys[j]]])
D[j, i] = distribution.log_pdf(x)
if log_domain == False:
D = exp(D)
contour(Xs, Ys, D, origin='lower')
@staticmethod
def plot_array(Xs, Ys, D):
"""
Plots a 2D array
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
D - array to plot
"""
im = imshow(D, origin='lower')
im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()])
im.set_interpolation('nearest')
im.set_cmap('gray')
ylim([Ys.min(), Ys.max()])
xlim([Xs.min(), Xs.max()])
@staticmethod
def plot_data(Z, y=None):
"""
Plots collection of 2D points and optionally adds a marker to one of them
Z - set of row-vectors points to plot
y - one point that is marked in red, might be None
"""
plot(Z[:, 0], Z[:, 1], '*', markersize=3.0, markeredgewidth=.1)
if y is not None:
plot(y[0, 0], y[0, 1], 'r*', markersize=10.0, markeredgewidth=.1)
@staticmethod
def get_gaussian_ellipse_artist(gaussian, nstd=1.96, linewidth=1):
"""
Returns an allipse artist for nstd times the standard deviation of this
Gaussian
"""
assert(isinstance(gaussian, Gaussian))
assert(gaussian.dimension == 2)
# compute eigenvalues (ordered)
vals, vecs = eigh(gaussian.L.dot(gaussian.L.T))
order = vals.argsort()[::-1]
vals, vecs = vals[order], vecs[:, order]
theta = numpy.degrees(arctan2(*vecs[:, 0][::-1]))
# width and height are "full" widths, not radius
width, height = 2 * nstd * sqrt(vals)
e = Ellipse(xy=gaussian.mu, width=width, height=height, angle=theta, \
edgecolor="red", fill=False, linewidth=linewidth)
return e
| bsd-2-clause | 31,964,555,859,623,480 | 33.072289 | 90 | 0.563296 | false | 3.793427 | false | false | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/add_newdocs.py | 1 | 44163 | from lib import add_newdoc
add_newdoc('numpy.core','dtype',
[('fields', "Fields of the data-type or None if no fields"),
('names', "Names of fields or None if no fields"),
('alignment', "Needed alignment for this data-type"),
('byteorder',
"Little-endian (<), big-endian (>), native (=), or "\
"not-applicable (|)"),
('char', "Letter typecode for this data-type"),
('type', "Type object associated with this data-type"),
('kind', "Character giving type-family of this data-type"),
('itemsize', "Size of each item"),
('hasobject', "Non-zero if Python objects are in "\
"this data-type"),
('num', "Internally-used number for builtin base"),
('newbyteorder',
"""self.newbyteorder(<endian>)
returns a copy of the dtype object with altered byteorders.
If <endian> is not given all byteorders are swapped.
Otherwise endian can be '>', '<', or '=' to force a particular
byteorder. Data-types in all fields are also updated in the
new dtype object.
"""),
("__reduce__", "self.__reduce__() for pickling"),
("__setstate__", "self.__setstate__() for pickling"),
("subdtype", "A tuple of (descr, shape) or None"),
("descr", "The array_interface data-type descriptor."),
("str", "The array interface typestring."),
("name", "The name of the true data-type"),
("base", "The base data-type or self if no subdtype"),
("shape", "The shape of the subdtype or (1,)"),
("isbuiltin", "Is this a built-in data-type?"),
("isnative", "Is the byte-order of this data-type native?")
]
)
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
# attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""documentation needed
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""An N-d tuple of current coordinates.
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""documentation needed
"""))
# functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""copy() Get a copy of the iterator as a 1-d array
"""))
###############################################################################
#
# broadcast
#
###############################################################################
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""current index in broadcasted result
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""tuple of individual iterators
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""number of dimensions of broadcasted result
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""number of iterators
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""shape of broadcasted result
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""total size of broadcasted result
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray','array',
"""array(object, dtype=None, copy=1,order=None, subok=0,ndmin=0)
Return an array from object with the specified date-type.
Inputs:
object - an array, any object exposing the array interface, any
object whose __array__ method returns an array, or any
(nested) sequence.
dtype - The desired data-type for the array. If not given, then
the type will be determined as the minimum type required
to hold the objects in the sequence. This argument can only
be used to 'upcast' the array. For downcasting, use the
.astype(t) method.
copy - If true, then force a copy. Otherwise a copy will only occur
if __array__ returns a copy, obj is a nested sequence, or
a copy is needed to satisfy any of the other requirements
order - Specify the order of the array. If order is 'C', then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'FORTRAN', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest). If order is None, then the returned array may
be in either C-, or Fortran-contiguous order or even
discontiguous.
subok - If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array
ndmin - Specifies the minimum number of dimensions that the resulting
array should have. 1's will be pre-pended to the shape as
needed to meet this requirement.
""")
add_newdoc('numpy.core.multiarray','empty',
"""empty((d1,...,dn),dtype=float,order='C')
Return a new array of shape (d1,...,dn) and given type with all its
entries uninitialized. This can be faster than zeros.
""")
add_newdoc('numpy.core.multiarray','scalar',
"""scalar(dtype,obj)
Return a new scalar array of the given type initialized with
obj. Mainly for pickle support. The dtype must be a valid data-type
descriptor. If dtype corresponds to an OBJECT descriptor, then obj
can be any object, otherwise obj must be a string. If obj is not given
it will be interpreted as None for object type and zeros for all other
types.
""")
add_newdoc('numpy.core.multiarray','zeros',
"""zeros((d1,...,dn),dtype=float,order='C')
Return a new array of shape (d1,...,dn) and type typecode with all
it's entries initialized to zero.
""")
add_newdoc('numpy.core.multiarray','set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray','fromstring',
"""fromstring(string, dtype=float, count=-1, sep='')
Return a new 1d array initialized from the raw binary data in string.
If count is positive, the new array will have count elements, otherwise its
size is determined by the size of string. If sep is not empty then the
string is interpreted in ASCII mode and converted to the desired number type
using sep as the separator between elements (extra whitespace is ignored).
""")
add_newdoc('numpy.core.multiarray','fromiter',
"""fromiter(iterable, dtype, count=-1)
Return a new 1d array initialized from iterable. If count is
nonegative, the new array will have count elements, otherwise it's
size is determined by the generator.
""")
add_newdoc('numpy.core.multiarray','fromfile',
"""fromfile(file=, dtype=float, count=-1, sep='') -> array.
Required arguments:
file -- open file object or string containing file name.
Keyword arguments:
dtype -- type and order of the returned array (default float)
count -- number of items to input (default all)
sep -- separater between items if file is a text file (default "")
Return an array of the given data type from a text or binary file. The
'file' argument can be an open file or a string with the name of a file to
read from. If 'count' == -1 the entire file is read, otherwise count is the
number of items of the given type to read in. If 'sep' is "" it means to
read binary data from the file using the specified dtype, otherwise it gives
the separator between elements in a text file. The 'dtype' value is also
used to determine the size and order of the items in binary files.
Data written using the tofile() method can be conveniently recovered using
this function.
WARNING: This function should be used sparingly as the binary files are not
platform independent. In particular, they contain no endianess or datatype
information. Nevertheless it can be useful for reading in simply formatted
or binary data quickly.
""")
add_newdoc('numpy.core.multiarray','frombuffer',
"""frombuffer(buffer=, dtype=float, count=-1, offset=0)
Returns a 1-d array of data type dtype from buffer. The buffer
argument must be an object that exposes the buffer interface. If
count is -1 then the entire buffer is used, otherwise, count is the
size of the output. If offset is given then jump that far into the
buffer. If the buffer has data that is out not in machine byte-order,
than use a propert data type descriptor. The data will not be
byteswapped, but the array will manage it in future operations.
""")
add_newdoc('numpy.core.multiarray','concatenate',
"""concatenate((a1, a2, ...), axis=0)
Join arrays together.
The tuple of sequences (a1, a2, ...) are joined along the given axis
(default is the first one) into a single numpy array.
Example:
>>> concatenate( ([0,1,2], [5,6,7]) )
array([0, 1, 2, 5, 6, 7])
""")
add_newdoc('numpy.core.multiarray','inner',
"""inner(a,b)
Returns the dot product of two arrays, which has shape a.shape[:-1] +
b.shape[:-1] with elements computed by the product of the elements
from the last dimensions of a and b.
""")
add_newdoc('numpy.core','fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray','correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray','arange',
"""arange([start,] stop[, step,], dtype=None)
For integer arguments, just like range() except it returns an array
whose type can be specified by the keyword argument dtype. If dtype
is not specified, the type of the result is deduced from the type of
the arguments.
For floating point arguments, the length of the result is ceil((stop -
start)/step). This rule may result in the last element of the result
being greater than stop.
""")
add_newdoc('numpy.core.multiarray','_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NDARRAY_VERSION number.
""")
add_newdoc('numpy.core.multiarray','_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray','set_string_function',
"""set_string_function(f, repr=1)
Set the python function f to be the function used to obtain a pretty
printable string version of an array whenever an array is printed.
f(M) should expect an array argument M, and should return a string
consisting of the desired representation of M for printing.
""")
add_newdoc('numpy.core.multiarray','set_numeric_ops',
"""set_numeric_ops(op=func, ...)
Set some or all of the number methods for all array objects. Do not
forget **dict can be used as the argument list. Return the functions
that were replaced, which can be stored and set later.
""")
add_newdoc('numpy.core.multiarray','where',
"""where(condition, x, y) or where(condition)
Return elements from `x` or `y`, depending on `condition`.
*Parameters*:
condition : array of bool
When True, yield x, otherwise yield y.
x,y : 1-dimensional arrays
Values from which to choose.
*Notes*
This is equivalent to
[xv if c else yv for (c,xv,yv) in zip(condition,x,y)]
The result is shaped like `condition` and has elements of `x`
or `y` where `condition` is respectively True or False.
In the special case, where only `condition` is given, the
tuple condition.nonzero() is returned, instead.
*Examples*
>>> where([True,False,True],[1,2,3],[4,5,6])
array([1, 5, 3])
""")
add_newdoc('numpy.core.multiarray','lexsort',
"""lexsort(keys=, axis=-1) -> array of indices. Argsort with list of keys.
Perform an indirect sort using a list of keys. The first key is sorted,
then the second, and so on through the list of keys. At each step the
previous order is preserved when equal keys are encountered. The result is
a sort on multiple keys. If the keys represented columns of a spreadsheet,
for example, this would sort using multiple columns (the last key being
used for the primary sort order, the second-to-last key for the secondary
sort order, and so on). The keys argument must be a sequence of things
that can be converted to arrays of the same shape.
Parameters:
a : array type
Array containing values that the returned indices should sort.
axis : integer
Axis to be indirectly sorted. None indicates that the flattened
array should be used. Default is -1.
Returns:
indices : integer array
Array of indices that sort the keys along the specified axis. The
array has the same shape as the keys.
SeeAlso:
argsort : indirect sort
sort : inplace sort
""")
add_newdoc('numpy.core.multiarray','can_cast',
"""can_cast(from=d1, to=d2)
Returns True if data type d1 can be cast to data type d2 without
losing precision.
""")
add_newdoc('numpy.core.multiarray','newbuffer',
"""newbuffer(size)
Return a new uninitialized buffer object of size bytes
""")
add_newdoc('numpy.core.multiarray','getbuffer',
"""getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset. Default is the entire buffer. A
read-write buffer is attempted followed by a read-only buffer.
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type-descriptor object
details the data-type in an array (including byteorder and any
fields). An array can be constructed using the numpy.array
command. Arrays are sequence, mapping and numeric objects.
More information is available in the numpy module and by looking
at the methods and attributes of an array.
ndarray.__new__(subtype, shape=, dtype=float, buffer=None,
offset=0, strides=None, order=None)
There are two modes of creating an array using __new__:
1) If buffer is None, then only shape, dtype, and order
are used
2) If buffer is an object exporting the buffer interface, then
all keywords are interpreted.
The dtype parameter can be any object that can be interpreted
as a numpy.dtype object.
No __init__ method is needed because the array is fully
initialized after the __new__ method.
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
"""Allow the array to be interpreted as a ctypes object by returning the
data-memory location as an integer
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""Base object if memory is from some other object.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""A ctypes interface object.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Buffer object pointing to the start of the data.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""Data-type for the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""Imaginary part of the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""Length of one element in bytes.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""Special object providing array flags.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""A 1-d flat iterator.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""Number of bytes in the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""Number of array dimensions.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""Real part of the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""Tuple of array dimensions.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""Number of elements in the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""Tuple of bytes to step in each dimension.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""Same as self.transpose() except self is returned for self.ndim < 2.
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as a from ndarray obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__(|order) -> copy, possibly with different order.
Return a copy of the array.
Argument:
order -- Order of returned copy (default 'C')
If order is 'C' (False) then the result is contiguous (default).
If order is 'Fortran' (True) then the result has fortran order.
If order is 'Any' (None) then the result has fortran order
only if m is already in fortran order.;
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__() -> Deep copy of array.
Used if copy.deepcopy is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(version, shape, typecode, isfortran, rawdata)
For unpickling.
Arguments:
version -- optional pickle version. If omitted defaults to 0.
shape -- a tuple giving the shape
typecode -- a typecode
isFortran -- a bool stating if Fortran or no
rawdata -- a binary string with the data (or a list if Object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
""" a.all(axis=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
""" a.any(axis=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
""" a.argmax(axis=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
""" a.argmin(axis=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""a.argsort(axis=-1, kind='quicksort', order=None) -> indices
Perform an indirect sort along the given axis using the algorithm specified
by the kind keyword. It returns an array of indices of the same shape as
'a' that index data along the given axis in sorted order.
:Parameters:
axis : integer
Axis to be indirectly sorted. None indicates that the flattened
array should be used. Default is -1.
kind : string
Sorting algorithm to use. Possible values are 'quicksort',
'mergesort', or 'heapsort'. Default is 'quicksort'.
order : list type or None
When a is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
:Returns:
indices : integer array
Array of indices that sort 'a' along the specified axis.
:SeeAlso:
- lexsort : indirect stable sort with multiple keys
- sort : inplace sort
:Notes:
------
The various sorts are characterized by average speed, worst case
performance, need for work space, and whether they are stable. A stable
sort keeps items with the same key in the same relative order. The three
available algorithms have the following properties:
|------------------------------------------------------|
| kind | speed | worst case | work space | stable|
|------------------------------------------------------|
|'quicksort'| 1 | O(n^2) | 0 | no |
|'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes |
|'heapsort' | 3 | O(n*log(n)) | 0 | no |
|------------------------------------------------------|
All the sort algorithms make temporary copies of the data when the sort is not
along the last axis. Consequently, sorts along the last axis are faster and use
less space than sorts along other axis.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""a.astype(t) -> Copy of array cast to type t.
Cast array m to type t. t can be either a string representing a typecode,
or a python type object of type int, float, or complex.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""a.byteswap(False) -> View or copy. Swap the bytes in the array.
Swap the bytes in the array. Return the byteswapped array. If the first
argument is True, byteswap in-place and return a reference to self.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
""" a.choose(b0, b1, ..., bn, out=None, mode='raise')
Return an array that merges the b_i arrays together using 'a' as
the index The b_i arrays and 'a' must all be broadcastable to the
same shape. The output at a particular position is the input
array b_i at that position depending on the value of 'a' at that
position. Therefore, 'a' must be an integer array with entries
from 0 to n+1.;
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""a.clip(min=, max=, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""a.compress(condition=, axis=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""a.conj()
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""a.conjugate()
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""a.copy(|order) -> copy, possibly with different order.
Return a copy of the array.
Argument:
order -- Order of returned copy (default 'C')
If order is 'C' (False) then the result is contiguous (default).
If order is 'Fortran' (True) then the result has fortran order.
If order is 'Any' (None) then the result has fortran order
only if m is already in fortran order.;
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""a.cumprod(axis=None, dtype=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""a.cumsum(axis=None, dtype=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""a.diagonal(offset=0, axis1=0, axis2=1) -> diagonals
If a is 2-d, return the diagonal of self with the given offset, i.e., the
collection of elements of the form a[i,i+offset]. If a is n-d with n > 2,
then the axes specified by axis1 and axis2 are used to determine the 2-d
subarray whose diagonal is returned. The shape of the resulting array can
be determined by removing axis1 and axis2 and appending an index to the
right equal to the size of the resulting diagonals.
:Parameters:
offset : integer
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to main diagonal.
axis1 : integer
Axis to be used as the first axis of the 2-d subarrays from which
the diagonals should be taken. Defaults to first index.
axis2 : integer
Axis to be used as the second axis of the 2-d subarrays from which
the diagonals should be taken. Defaults to second index.
:Returns:
array_of_diagonals : same type as original array
If a is 2-d, then a 1-d array containing the diagonal is returned.
If a is n-d, n > 2, then an array of diagonals is returned.
:SeeAlso:
- diag : matlab workalike for 1-d and 2-d arrays.
- diagflat : creates diagonal arrays
- trace : sum along diagonals
Examples
--------
>>> a = arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
>>> a = arange(8).reshape(2,2,2)
>>> a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0,-2,-1)
array([[0, 3],
[4, 7]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file) Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load
Arguments:
file -- string naming the dump file.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""a.dumps() returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""a.fill(value) -> None. Fill the array with the scalar value.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""a.flatten([fortran]) return a 1-d array (always copy)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""a.getfield(dtype, offset) -> field of array as given type.
Returns a field of the given array as a certain type. A field is a view of
the array data with each itemsize determined by the given type and the
offset into the current array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""a.item() -> copy of first array item as Python scalar.
Copy the first element of array to a standard Python scalar and return
it. The array must be of size one.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""a.max(axis=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""a.mean(axis=None, dtype=None, out=None) -> mean
Returns the average of the array elements. The average is taken over the
flattened array by default, otherwise over the specified axis.
:Parameters:
axis : integer
Axis along which the means are computed. The default is
to compute the standard deviation of the flattened array.
dtype : type
Type to use in computing the means. For arrays of
integer type the default is float32, for arrays of float types it
is the same as the array type.
out : ndarray
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
:Returns:
mean : The return type varies, see above.
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
:SeeAlso:
- var : variance
- std : standard deviation
Notes
-----
The mean is the sum of the elements along the axis divided by the
number of elements.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""a.min(axis=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""a.newbyteorder(<byteorder>) is equivalent to
a.view(a.dtype.newbytorder(<byteorder>))
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""a.nonzero() returns a tuple of arrays
Returns a tuple of arrays, one for each dimension of a,
containing the indices of the non-zero elements in that
dimension. The corresponding non-zero values can be obtained
with
a[a.nonzero()].
To group the indices by element, rather than dimension, use
transpose(a.nonzero())
instead. The result of this is always a 2d array, with a row for
each non-zero element.;
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""a.prod(axis=None, dtype=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""a.ptp(axis=None) a.max(axis)-a.min(axis)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""a.put(indices, values, mode) sets a.flat[n] = values[n] for
each n in indices. If values is shorter than indices then it
will repeat.
"""))
add_newdoc('numpy.core.multiarray', 'putmask',
"""putmask(a, mask, values) sets a.flat[n] = values[n] for each n where
mask.flat[n] is true. If values is not the same size of a and mask then
it will repeat. This gives different behavior than a[mask] = values.
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""a.ravel([fortran]) return a 1-d array (copy only if needed)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""a.repeat(repeats=, axis=none)
copy elements of a, repeats times. the repeats argument must be a sequence
of length a.shape[axis] or a scalar.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""a.reshape(d1, d2, ..., dn, order='c')
Return a new array from this one. The new array must have the same number
of elements as self. Also always returns a view or raises a ValueError if
that is impossible.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""a.resize(new_shape, refcheck=True, order=False) -> None. Change array shape.
Change size and shape of self inplace. Array must own its own memory and
not be referenced by other arrays. Returns None.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""a.round(decimals=0, out=None) -> out (a). Rounds to 'decimals' places.
Keyword arguments:
decimals -- number of decimals to round to (default 0). May be negative.
out -- existing array to use for output (default a).
Return:
Reference to out, where None specifies the original array a.
Round to the specified number of decimals. When 'decimals' is negative it
specifies the number of positions to the left of the decimal point. The
real and imaginary parts of complex numbers are rounded separately. Nothing
is done if the array is not of float type and 'decimals' is >= 0.
The keyword 'out' may be used to specify a different array to hold the
result rather than the default 'a'. If the type of the array specified by
'out' differs from that of 'a', the result is cast to the new type,
otherwise the original type is kept. Floats round to floats by default.
Numpy rounds to even. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round to
0.0, etc. Results may also be surprising due to the inexact representation
of decimal fractions in IEEE floating point and the errors introduced in
scaling the numbers when 'decimals' is something other than 0.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""a.searchsorted(v, side='left') -> index array.
Find the indices into a sorted array such that if the corresponding keys in
v were inserted before the indices the order of a would be preserved. If
side='left', then the first such index is returned. If side='right', then
the last such index is returned. If there is no such index because the key
is out of bounds, then the length of a is returned, i.e., the key would
need to be appended. The returned index array has the same shape as v.
:Parameters:
v : array or list type
Array of keys to be searched for in a.
side : string
Possible values are : 'left', 'right'. Default is 'left'. Return
the first or last index where the key could be inserted.
:Returns:
indices : integer array
The returned array has the same shape as v.
:SeeAlso:
- sort
- histogram
:Notes:
-------
The array a must be 1-d and is assumed to be sorted in ascending order.
Searchsorted uses binary search to find the required insertion points.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""m.setfield(value, dtype, offset) -> None.
places val into field of the given array defined by the data type and offset.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""a.setflags(write=None, align=None, uic=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""a.sort(axis=-1, kind='quicksort', order=None) -> None.
Perform an inplace sort along the given axis using the algorithm specified
by the kind keyword.
:Parameters:
axis : integer
Axis to be sorted along. None indicates that the flattened array
should be used. Default is -1.
kind : string
Sorting algorithm to use. Possible values are 'quicksort',
'mergesort', or 'heapsort'. Default is 'quicksort'.
order : list type or None
When a is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
:Returns:
None
:SeeAlso:
- argsort : indirect sort
- lexsort : indirect stable sort on multiple keys
- searchsorted : find keys in sorted array
:Notes:
------
The various sorts are characterized by average speed, worst case
performance, need for work space, and whether they are stable. A stable
sort keeps items with the same key in the same relative order. The three
available algorithms have the following properties:
|------------------------------------------------------|
| kind | speed | worst case | work space | stable|
|------------------------------------------------------|
|'quicksort'| 1 | O(n^2) | 0 | no |
|'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes |
|'heapsort' | 3 | O(n*log(n)) | 0 | no |
|------------------------------------------------------|
All the sort algorithms make temporary copies of the data when the sort is not
along the last axis. Consequently, sorts along the last axis are faster and use
less space than sorts along other axis.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""m.squeeze() eliminate all length-1 dimensions
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""a.std(axis=None, dtype=None, out=None) -> standard deviation.
Returns the standard deviation of the array elements, a measure of the
spread of a distribution. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
:Parameters:
axis : integer
Axis along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : type
Type to use in computing the standard deviation. For arrays of
integer type the default is float32, for arrays of float types it
is the same as the array type.
out : ndarray
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
:Returns:
standard deviation : The return type varies, see above.
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
:SeeAlso:
- var : variance
- mean : average
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e. var = sqrt(mean((x - x.mean())**2)). The
computed standard deviation is biased, i.e., the mean is computed by
dividing by the number of elements, N, rather than by N-1.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""a.sum(axis=None, dtype=None) -> Sum of array over given axis.
Sum the array over the given axis. If the axis is None, sum over
all dimensions of the array.
The optional dtype argument is the data type for the returned
value and intermediate calculations. The default is to upcast
(promote) smaller integer types to the platform-dependent int.
For example, on 32-bit platforms:
a.dtype default sum dtype
---------------------------------------------------
bool, int8, int16, int32 int32
Warning: The arithmetic is modular and no error is raised on overflow.
Examples:
>>> array([0.5, 1.5]).sum()
2.0
>>> array([0.5, 1.5]).sum(dtype=int32)
1
>>> array([[0, 1], [0, 5]]).sum(axis=0)
array([0, 6])
>>> array([[0, 1], [0, 5]]).sum(axis=1)
array([1, 5])
>>> ones(128, dtype=int8).sum(dtype=int8) # overflow!
-128
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""a.swapaxes(axis1, axis2) -> new view with axes swapped.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""a.take(indices, axis=None, out=None, mode='raise') -> new array.
The new array is formed from the elements of a indexed by indices along the
given axis.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""a.tofile(fid, sep="", format="%s") -> None. Write the data to a file.
Required arguments:
file -- an open file object or a string containing a filename
Keyword arguments:
sep -- separator for text output. Write binary if empty (default "")
format -- format string for text file output (default "%s")
A convenience function for quick storage of array data. Information on
endianess and precision is lost, so this method is not a good choice for
files intended to archive data or transport data between machines with
different endianess. Some of these problems can be overcome by outputting
the data as text files at the expense of speed and file size.
If 'sep' is empty this method is equivalent to file.write(a.tostring()). If
'sep' is not empty each data item is converted to the nearest Python type
and formatted using "format"%item. The resulting strings are written to the
file separated by the contents of 'sep'. The data is always written in "C"
(row major) order independent of the order of 'a'.
The data produced by this method can be recovered by using the function
fromfile().
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""a.tolist() -> Array as hierarchical list.
Copy the data portion of the array to a hierarchical python list and return
that list. Data items are converted to the nearest compatible Python type.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring',
"""a.tostring(order='C') -> raw copy of array data as a Python string.
Keyword arguments:
order -- order of the data item in the copy {"C","F","A"} (default "C")
Construct a Python string containing the raw bytes in the array. The order
of the data in arrays with ndim > 1 is specified by the 'order' keyword and
this keyword overrides the order of the array. The
choices are:
"C" -- C order (row major)
"Fortran" -- Fortran order (column major)
"Any" -- Current order of array.
None -- Same as "Any"
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
return the sum along the offset diagonal of the array's indicated
axis1 and axis2.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""a.transpose(*axes)
Returns a view of 'a' with axes transposed. If no axes are given,
or None is passed, switches the order of the axes. For a 2-d
array, this is the usual matrix transpose. If axes are given,
they describe how the axes are permuted.
Example:
>>> a = array([[1,2],[3,4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1,0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1,0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""a.var(axis=None, dtype=None, out=None) -> variance
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by default,
otherwise over the specified axis.
:Parameters:
axis : integer
Axis along which the variance is computed. The default is to
compute the variance of the flattened array.
dtype : type
Type to use in computing the variance. For arrays of integer type
the default is float32, for arrays of float types it is the same as
the array type.
out : ndarray
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
:Returns:
variance : The return type varies, see above.
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
:SeeAlso:
- std : standard deviation
- mean: average
Notes
-----
The variance is the average of the squared deviations from the mean, i.e.
var = mean((x - x.mean())**2). The computed variance is biased, i.e.,
the mean is computed by dividing by the number of elements, N, rather
than by N-1.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""a.view(<type>) -> new view of array with same data.
Type can be either a new sub-type object or a data-descriptor object
"""))
| bsd-3-clause | -6,906,141,212,455,126,000 | 29.797071 | 83 | 0.613206 | false | 4.158475 | false | false | false |
italomaia/turtle-linux | games/BubbleKing/lib/menu.py | 1 | 13774 | import os
import pygame
from pygame.locals import *
from pgu import engine
import data
from cnst import *
import levels
class Menu(engine.State):
def __init__(self,game):
self.game = game
def init(self):
self.font = self.game.font
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr','2.png')))
self.cur = 0
self.game.lcur = 0
self.levels = []
#for fname in os.listdir(data.filepath('levels')):
#if fname[0]=='.': continue
#self.levels.append((fname,fname.replace('.tga','')))
#self.levels.sort()
for fname,title in levels.LEVELS:
self.levels.append((fname,title))
self.items = [
('play the game!','start'),
('select <L>','play'),
('help','help'),
('credits','credits'),
('quit','quit'),
]
self.rects = []
self.frame = 0
def paint(self,screen):
x = self.frame%(self.bkgr.get_width())
screen.blit(self.bkgr,(-x,0))
screen.blit(self.bkgr,(-x+self.bkgr.get_width(),0))
x,y = 0,4
fnt = self.game.fonts['title']
c =(0,0,0)
text = TITLE
img = fnt.render(text,1,c)
screen.blit(img,((SW-img.get_width())/2,y))
y += 48
fnt = self.font
text = 'high: %05d'%self.game.high
c = (0x00,0x00,0x00)
img = fnt.render(text,1,c)
x = (SW-img.get_width())/2
screen.blit(img,(x+1,y+1))
c = (0xff,0xff,0xff)
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 36
x = 90
for n in xrange(0,len(self.items)):
text,value = self.items[n]
text = text.replace('L',self.levels[self.game.lcur][1])
c = (0x00,0x00,0x00)
img = fnt.render(text,1,c)
x = (SW-img.get_width())/2
screen.blit(img,(x+1,y+1))
c = (0xff,0xff,0xff)
if n == self.cur: c = (0xaa,0xaa,0xaa)
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 24
text = 'www.imitationpickles.org'
c = (0x00,0x00,0x00)
img = fnt.render(text,1,c)
x = (SW-img.get_width())/2
y = SH-(img.get_height()+4)
screen.blit(img,(x+1,y+1))
c = (0xff,0xff,0xff)
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
self.game.flip()
def update(self,screen):
return self.paint(screen)
def loop(self):
self.game.music_play('title')
self.frame += 1
def event(self,e):
if e.type is USEREVENT and e.action == 'down':
self.cur = (self.cur+1)%len(self.items)
self.repaint()
elif e.type is USEREVENT and e.action == 'up':
self.cur = (self.cur-1+len(self.items))%len(self.items)
self.repaint()
elif e.type is USEREVENT and e.action == 'left':
self.game.lcur = (self.game.lcur-1+len(self.levels))%len(self.levels)
self.repaint()
elif e.type is USEREVENT and e.action == 'right':
self.game.lcur = (self.game.lcur+1)%len(self.levels)
self.repaint()
elif e.type is USEREVENT and e.action == 'exit':
return engine.Quit(self.game)
elif e.type is USEREVENT and (e.action == 'menu' or e.action == 'jump'):
text,value = self.items[self.cur]
if value == 'start':
self.game.init_play()
self.game.lcur = 0
import level
l = level.Level(self.game,None,self)
return Transition(self.game,l)
elif value == 'play':
self.game.init_play()
import level
l = level.Level(self.game,None,self)
return Transition(self.game,l)
elif value == 'quit':
return engine.Quit(self.game)
elif value == 'credits':
return Transition(self.game,Credits(self.game,self))
elif value == 'help':
return Transition(self.game,Help(self.game,self))
class Transition(engine.State):
def __init__(self,game,next):
self.game,self.next = game,next
def init(self):
self.s1 = self.game.screen.convert()
self.init2()
self.frame = 0
self.total = FPS
self.inc = 0
def init2(self):
if hasattr(self.next,'init') and not hasattr(self.next,'_init'):
self.next._init = 0
self.next.init()
self.s2 = self.game.screen.convert()
self.next.paint(self.s2)
def loop(self):
#self.frame += 1
self.inc += 1
#if (self.inc%2) == 0: self.frame += 1
self.frame += 1
if self.frame == self.total:
self.game.screen.blit(self.s2,(0,0))
self.game.flip()
return self.next
def update(self,screen):
return self.paint(screen)
def paint(self,screen):
f = self.frame
t = self.total
t2 = t/2
if f < t2:
i = self.s1
w = max(2,SW * (t2-f) / t2)
i = pygame.transform.scale(i,(w,SH*w/SW))
else:
f = t2-(f-t2)
i = self.s2
w = max(2,SW * (t2-f) / t2)
i = pygame.transform.scale(i,(w,SH*w/SW))
i = pygame.transform.scale(i,(SW,SH))
screen.blit(i,(0,0))
self.game.flip()
class Intro(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.frame = FPS
self.moon = pygame.image.load(data.filepath(os.path.join('intro','moon2.png'))).convert()
self.black = self.moon.convert()
self.black.fill((0,0,0))
def update(self,screen):
return self.paint(screen)
def loop(self):
self.frame += 1
if self.frame == FPS*7:
return Transition(self.game,Intro2(self.game,self.next))
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
screen.fill((0,0,0))
f = self.frame
inc = FPS
if 0 < f < inc:
pass
f -= inc
inc = FPS*7
if 0 < f < inc:
a = 255
if f > FPS*2:
screen.blit(self.moon,(0,0))
a = 255- ((f-FPS*2)*255/(FPS*2))
self.black.set_alpha(a)
screen.blit(self.black,(0,0))
fnt = self.game.fonts['intro']
x,y = 8,0
for text in ['... July 20, 1969','man first','walked on','the moon.']:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 36
if f < FPS:
a = 255-(f*255/FPS)
self.black.set_alpha(a)
screen.blit(self.black,(0,0))
self.game.flip()
class Intro2(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.moon = pygame.image.load(data.filepath(os.path.join('intro','moon2.png'))).convert()
img = pygame.image.load(data.filepath(os.path.join('images','player','right.png')))
w = 160
self.player = pygame.transform.scale(img,(w,img.get_height()*w/img.get_width()))
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr','2.png')))
self.frame = 0
def loop(self):
self.frame += 1
if self.frame == FPS*2:
return Transition(self.game,self.next)
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
#screen.fill((0,0,0))
screen.blit(self.bkgr,(0,0))
fnt = self.game.fonts['intro']
x,y = 8,0
for text in ['This is','the year','of the','seahorse!']:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 36
screen.blit(self.player,(130,0))
self.game.flip()
class Prompt(engine.State):
def __init__(self,game,text,yes,no):
self.game = game
self.text = text
self.yes = yes
self.no = no
def init(self):
self.font = self.game.fonts['pause']
self.bkgr = self.game.screen.convert()
def event(self,e):
if e.type is KEYDOWN and e.key == K_y:
return self.yes
if e.type is KEYDOWN and e.key == K_n:
return self.no
def paint(self,screen):
screen.blit(self.bkgr,(0,0))
text = self.text
fnt = self.font
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x,y = (SW-img.get_width())/2,(SH-img.get_height())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
self.game.flip()
class Pause(engine.State):
def __init__(self,game,text,next):
self.game = game
self.text = text
self.next = next
def init(self):
self.font = self.game.fonts['pause']
self.bkgr = self.game.screen.convert()
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return self.next
def paint(self,screen):
screen.blit(self.bkgr,(0,0))
text = self.text
fnt = self.font
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x,y = (SW-img.get_width())/2,(SH-img.get_height())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
self.game.flip()
class Credits(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.frame = 0
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr',"5.png"))).convert()
def update(self,screen):
return self.paint(screen)
pass
def loop(self):
self.frame += 1
#if self.frame == FPS*7:
#return Transition(self.game,Intro2(self.game,self.next))
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
x = self.frame%(self.bkgr.get_width())
screen.blit(self.bkgr,(-x,0))
screen.blit(self.bkgr,(-x+self.bkgr.get_width(),0))
fnt = self.game.fonts['help']
x,y = 8,10
for text in [
'Core Team',
'',
'philhassey - director, code, levels',
'trick - tiles, sprites',
'pekuja - code, levels',
'tim - music, levels',
'DrPetter - backgrounds, sfx',
'',
'Also thanks to:',
'fydo (level), Lerc (gfx), Tee (level)',
]:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x = (SW-img.get_width())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 20
self.game.flip()
class Help(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.frame = 0
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr',"5.png"))).convert()
def update(self,screen):
return self.paint(screen)
pass
def loop(self):
self.frame += 1
#if self.frame == FPS*7:
#return Transition(self.game,Intro2(self.game,self.next))
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
x = self.frame%(self.bkgr.get_width())
screen.blit(self.bkgr,(-x,0))
screen.blit(self.bkgr,(-x+self.bkgr.get_width(),0))
fnt = self.game.fonts['help']
x,y = 8,10
for text in [
'Help',
'',
'Use your arrow keys to',
'move the seahorse.',
'Button 1 - Jump',
'Button 2 - Shoot',
'',
'Enemies take 3 shots unless',
'you are powered up! You can',
'ride enemy bubbles.',
]:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x = (SW-img.get_width())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 20
self.game.flip()
| gpl-3.0 | 2,693,359,161,638,019,000 | 29.074236 | 102 | 0.483665 | false | 3.370198 | false | false | false |
mathiasertl/fabric | fabric/context_managers.py | 1 | 20926 | """
Context managers for use with the ``with`` statement.
.. note:: If you are using multiple directly nested ``with`` statements, it can
be convenient to use multiple context expressions in one single with
statement. Instead of writing::
with cd('/path/to/app'):
with prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
you can write::
with cd('/path/to/app'), prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
"""
from contextlib import contextmanager
import six
import socket
import select
from fabric.thread_handling import ThreadHandler
from fabric.state import output, win32, connections, env
from fabric import state
from fabric.utils import isatty
if six.PY2 is True:
from contextlib import nested
else:
from contextlib import ExitStack
class nested(ExitStack):
def __init__(self, *managers):
super(nested, self).__init__()
for manager in managers:
self.enter_context(manager)
if not win32:
import termios
import tty
def _set_output(groups, which):
"""
Refactored subroutine used by ``hide`` and ``show``.
"""
previous = {}
try:
# Preserve original values, pull in new given value to use
for group in output.expand_aliases(groups):
previous[group] = output[group]
output[group] = which
# Yield control
yield
finally:
# Restore original values
output.update(previous)
def documented_contextmanager(func):
wrapper = contextmanager(func)
wrapper.undecorated = func
return wrapper
@documented_contextmanager
def show(*groups):
"""
Context manager for setting the given output ``groups`` to True.
``groups`` must be one or more strings naming the output groups defined in
`~fabric.state.output`. The given groups will be set to True for the
duration of the enclosed block, and restored to their previous value
afterwards.
For example, to turn on debug output (which is typically off by default)::
def my_task():
with show('debug'):
run('ls /var/www')
As almost all output groups are displayed by default, `show` is most useful
for turning on the normally-hidden ``debug`` group, or when you know or
suspect that code calling your own code is trying to hide output with
`hide`.
"""
return _set_output(groups, True)
@documented_contextmanager
def hide(*groups):
"""
Context manager for setting the given output ``groups`` to False.
``groups`` must be one or more strings naming the output groups defined in
`~fabric.state.output`. The given groups will be set to False for the
duration of the enclosed block, and restored to their previous value
afterwards.
For example, to hide the "[hostname] run:" status lines, as well as
preventing printout of stdout and stderr, one might use `hide` as follows::
def my_task():
with hide('running', 'stdout', 'stderr'):
run('ls /var/www')
"""
return _set_output(groups, False)
@documented_contextmanager
def _setenv(variables):
"""
Context manager temporarily overriding ``env`` with given key/value pairs.
A callable that returns a dict can also be passed. This is necessary when
new values are being calculated from current values, in order to ensure that
the "current" value is current at the time that the context is entered, not
when the context manager is initialized. (See Issue #736.)
This context manager is used internally by `settings` and is not intended
to be used directly.
"""
if callable(variables):
variables = variables()
clean_revert = variables.pop('clean_revert', False)
previous = {}
new = []
for key, value in six.iteritems(variables):
if key in state.env:
previous[key] = state.env[key]
else:
new.append(key)
state.env[key] = value
try:
yield
finally:
if clean_revert:
for key, value in six.iteritems(variables):
# If the current env value for this key still matches the
# value we set it to beforehand, we are OK to revert it to the
# pre-block value.
if key in state.env and value == state.env[key]:
if key in previous:
state.env[key] = previous[key]
else:
del state.env[key]
else:
state.env.update(previous)
for key in new:
del state.env[key]
def settings(*args, **kwargs):
"""
Nest context managers and/or override ``env`` variables.
`settings` serves two purposes:
* Most usefully, it allows temporary overriding/updating of ``env`` with
any provided keyword arguments, e.g. ``with settings(user='foo'):``.
Original values, if any, will be restored once the ``with`` block closes.
* The keyword argument ``clean_revert`` has special meaning for
``settings`` itself (see below) and will be stripped out before
execution.
* In addition, it will use `contextlib.nested`_ to nest any given
non-keyword arguments, which should be other context managers, e.g.
``with settings(hide('stderr'), show('stdout')):``.
.. _contextlib.nested: http://docs.python.org/library/contextlib.html#contextlib.nested
These behaviors may be specified at the same time if desired. An example
will hopefully illustrate why this is considered useful::
def my_task():
with settings(
hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True
):
if run('ls /etc/lsb-release'):
return 'Ubuntu'
elif run('ls /etc/redhat-release'):
return 'RedHat'
The above task executes a `run` statement, but will warn instead of
aborting if the ``ls`` fails, and all output -- including the warning
itself -- is prevented from printing to the user. The end result, in this
scenario, is a completely silent task that allows the caller to figure out
what type of system the remote host is, without incurring the handful of
output that would normally occur.
Thus, `settings` may be used to set any combination of environment
variables in tandem with hiding (or showing) specific levels of output, or
in tandem with any other piece of Fabric functionality implemented as a
context manager.
If ``clean_revert`` is set to ``True``, ``settings`` will **not** revert
keys which are altered within the nested block, instead only reverting keys
whose values remain the same as those given. More examples will make this
clear; below is how ``settings`` operates normally::
# Before the block, env.parallel defaults to False, host_string to None
with settings(parallel=True, host_string='myhost'):
# env.parallel is True
# env.host_string is 'myhost'
env.host_string = 'otherhost'
# env.host_string is now 'otherhost'
# Outside the block:
# * env.parallel is False again
# * env.host_string is None again
The internal modification of ``env.host_string`` is nullified -- not always
desirable. That's where ``clean_revert`` comes in::
# Before the block, env.parallel defaults to False, host_string to None
with settings(parallel=True, host_string='myhost', clean_revert=True):
# env.parallel is True
# env.host_string is 'myhost'
env.host_string = 'otherhost'
# env.host_string is now 'otherhost'
# Outside the block:
# * env.parallel is False again
# * env.host_string remains 'otherhost'
Brand new keys which did not exist in ``env`` prior to using ``settings``
are also preserved if ``clean_revert`` is active. When ``False``, such keys
are removed when the block exits.
.. versionadded:: 1.4.1
The ``clean_revert`` kwarg.
"""
managers = list(args)
if kwargs:
managers.append(_setenv(kwargs))
return nested(*managers)
def cd(path):
"""
Context manager that keeps directory state when calling remote operations.
Any calls to `run`, `sudo`, `get`, or `put` within the wrapped block will
implicitly have a string similar to ``"cd <path> && "`` prefixed in order
to give the sense that there is actually statefulness involved.
.. note::
`cd` only affects *remote* paths -- to modify *local* paths, use
`~fabric.context_managers.lcd`.
Because use of `cd` affects all such invocations, any code making use of
those operations, such as much of the ``contrib`` section, will also be
affected by use of `cd`.
Like the actual 'cd' shell builtin, `cd` may be called with relative paths
(keep in mind that your default starting directory is your remote user's
``$HOME``) and may be nested as well.
Below is a "normal" attempt at using the shell 'cd', which doesn't work due
to how shell-less SSH connections are implemented -- state is **not** kept
between invocations of `run` or `sudo`::
run('cd /var/www')
run('ls')
The above snippet will list the contents of the remote user's ``$HOME``
instead of ``/var/www``. With `cd`, however, it will work as expected::
with cd('/var/www'):
run('ls') # Turns into "cd /var/www && ls"
Finally, a demonstration (see inline comments) of nesting::
with cd('/var/www'):
run('ls') # cd /var/www && ls
with cd('website1'):
run('ls') # cd /var/www/website1 && ls
.. note::
This context manager is currently implemented by appending to (and, as
always, restoring afterwards) the current value of an environment
variable, ``env.cwd``. However, this implementation may change in the
future, so we do not recommend manually altering ``env.cwd`` -- only
the *behavior* of `cd` will have any guarantee of backwards
compatibility.
.. note::
Space characters will be escaped automatically to make dealing with
such directory names easier.
.. versionchanged:: 1.0
Applies to `get` and `put` in addition to the command-running
operations.
.. seealso:: `~fabric.context_managers.lcd`
"""
return _change_cwd('cwd', path)
def lcd(path):
"""
Context manager for updating local current working directory.
This context manager is identical to `~fabric.context_managers.cd`, except
that it changes a different env var (`lcwd`, instead of `cwd`) and thus
only affects the invocation of `~fabric.operations.local` and the local
arguments to `~fabric.operations.get`/`~fabric.operations.put`.
Relative path arguments are relative to the local user's current working
directory, which will vary depending on where Fabric (or Fabric-using code)
was invoked. You can check what this is with `os.getcwd
<http://docs.python.org/release/2.6/library/os.html#os.getcwd>`_. It may be
useful to pin things relative to the location of the fabfile in use, which
may be found in :ref:`env.real_fabfile <real-fabfile>`
.. versionadded:: 1.0
"""
return _change_cwd('lcwd', path)
def _change_cwd(which, path):
path = path.replace(' ', r'\ ')
if state.env.get(which) and not path.startswith('/') and not path.startswith('~'):
new_cwd = state.env.get(which) + '/' + path
else:
new_cwd = path
return _setenv({which: new_cwd})
def path(path, behavior='append'):
"""
Append the given ``path`` to the PATH used to execute any wrapped commands.
Any calls to `run` or `sudo` within the wrapped block will implicitly have
a string similar to ``"PATH=$PATH:<path> "`` prepended before the given
command.
You may customize the behavior of `path` by specifying the optional
``behavior`` keyword argument, as follows:
* ``'append'``: append given path to the current ``$PATH``, e.g.
``PATH=$PATH:<path>``. This is the default behavior.
* ``'prepend'``: prepend given path to the current ``$PATH``, e.g.
``PATH=<path>:$PATH``.
* ``'replace'``: ignore previous value of ``$PATH`` altogether, e.g.
``PATH=<path>``.
.. note::
This context manager is currently implemented by modifying (and, as
always, restoring afterwards) the current value of environment
variables, ``env.path`` and ``env.path_behavior``. However, this
implementation may change in the future, so we do not recommend
manually altering them directly.
.. versionadded:: 1.0
"""
return _setenv({'path': path, 'path_behavior': behavior})
def prefix(command):
"""
Prefix all wrapped `run`/`sudo` commands with given command plus ``&&``.
This is nearly identical to `~fabric.operations.cd`, except that nested
invocations append to a list of command strings instead of modifying a
single string.
Most of the time, you'll want to be using this alongside a shell script
which alters shell state, such as ones which export or alter shell
environment variables.
For example, one of the most common uses of this tool is with the
``workon`` command from `virtualenvwrapper
<http://www.doughellmann.com/projects/virtualenvwrapper/>`_::
with prefix('workon myvenv'):
run('./manage.py syncdb')
In the above snippet, the actual shell command run would be this::
$ workon myvenv && ./manage.py syncdb
This context manager is compatible with `~fabric.context_managers.cd`, so
if your virtualenv doesn't ``cd`` in its ``postactivate`` script, you could
do the following::
with cd('/path/to/app'):
with prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
Which would result in executions like so::
$ cd /path/to/app && workon myvenv && ./manage.py syncdb
$ cd /path/to/app && workon myvenv && ./manage.py loaddata myfixture
Finally, as alluded to near the beginning,
`~fabric.context_managers.prefix` may be nested if desired, e.g.::
with prefix('workon myenv'):
run('ls')
with prefix('source /some/script'):
run('touch a_file')
The result::
$ workon myenv && ls
$ workon myenv && source /some/script && touch a_file
Contrived, but hopefully illustrative.
"""
return _setenv(lambda: {'command_prefixes': state.env.command_prefixes + [command]})
@documented_contextmanager
def char_buffered(pipe):
"""
Force local terminal ``pipe`` be character, not line, buffered.
Only applies on Unix-based systems; on Windows this is a no-op.
"""
if win32 or not isatty(pipe):
yield
else:
old_settings = termios.tcgetattr(pipe)
tty.setcbreak(pipe)
try:
yield
finally:
termios.tcsetattr(pipe, termios.TCSADRAIN, old_settings)
def shell_env(**kw):
"""
Set shell environment variables for wrapped commands.
For example, the below shows how you might set a ZeroMQ related environment
variable when installing a Python ZMQ library::
with shell_env(ZMQ_DIR='/home/user/local'):
run('pip install pyzmq')
As with `~fabric.context_managers.prefix`, this effectively turns the
``run`` command into::
$ export ZMQ_DIR='/home/user/local' && pip install pyzmq
Multiple key-value pairs may be given simultaneously.
.. note::
If used to affect the behavior of `~fabric.operations.local` when
running from a Windows localhost, ``SET`` commands will be used to
implement this feature.
"""
return _setenv({'shell_env': kw})
def _forwarder(chan, sock):
# Bidirectionally forward data between a socket and a Paramiko channel.
while True:
r, w, x = select.select([sock, chan], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
sock.send(data)
chan.close()
sock.close()
@documented_contextmanager
def remote_tunnel(remote_port, local_port=None, local_host="localhost",
remote_bind_address="127.0.0.1"):
"""
Create a tunnel forwarding a locally-visible port to the remote target.
For example, you can let the remote host access a database that is
installed on the client host::
# Map localhost:6379 on the server to localhost:6379 on the client,
# so that the remote 'redis-cli' program ends up speaking to the local
# redis-server.
with remote_tunnel(6379):
run("redis-cli -i")
The database might be installed on a client only reachable from the client
host (as opposed to *on* the client itself)::
# Map localhost:6379 on the server to redis.internal:6379 on the client
with remote_tunnel(6379, local_host="redis.internal")
run("redis-cli -i")
``remote_tunnel`` accepts up to four arguments:
* ``remote_port`` (mandatory) is the remote port to listen to.
* ``local_port`` (optional) is the local port to connect to; the default is
the same port as the remote one.
* ``local_host`` (optional) is the locally-reachable computer (DNS name or
IP address) to connect to; the default is ``localhost`` (that is, the
same computer Fabric is running on).
* ``remote_bind_address`` (optional) is the remote IP address to bind to
for listening, on the current target. It should be an IP address assigned
to an interface on the target (or a DNS name that resolves to such IP).
You can use "0.0.0.0" to bind to all interfaces.
.. note::
By default, most SSH servers only allow remote tunnels to listen to the
localhost interface (127.0.0.1). In these cases, `remote_bind_address`
is ignored by the server, and the tunnel will listen only to 127.0.0.1.
.. versionadded: 1.6
"""
if local_port is None:
local_port = remote_port
sockets = []
channels = []
threads = []
def accept(channel, src, dest):
src_addr, src_port = src
dest_addr, dest_port = dest
channels.append(channel)
sock = socket.socket()
sockets.append(sock)
try:
sock.connect((local_host, local_port))
except Exception:
print("[%s] rtunnel: cannot connect to %s:%d (from local)" %
(env.host_string, local_host, local_port))
channel.close()
return
print("[%s] rtunnel: opened reverse tunnel: %r -> %r -> %r"
% (env.host_string, channel.origin_addr,
channel.getpeername(), (local_host, local_port)))
th = ThreadHandler('fwd', _forwarder, channel, sock)
threads.append(th)
transport = connections[env.host_string].get_transport()
transport.request_port_forward(remote_bind_address, remote_port, handler=accept)
try:
yield
finally:
for sock, chan, th in zip(sockets, channels, threads):
sock.close()
chan.close()
th.thread.join()
th.raise_if_needed()
transport.cancel_port_forward(remote_bind_address, remote_port)
quiet = lambda: settings(hide('everything'), warn_only=True)
quiet.__doc__ = """
Alias to ``settings(hide('everything'), warn_only=True)``.
Useful for wrapping remote interrogative commands which you expect to fail
occasionally, and/or which you want to silence.
Example::
with quiet():
have_build_dir = run("test -e /tmp/build").succeeded
When used in a task, the above snippet will not produce any ``run: test -e
/tmp/build`` line, nor will any stdout/stderr display, and command failure
is ignored.
.. seealso::
:ref:`env.warn_only <warn_only>`,
`~fabric.context_managers.settings`,
`~fabric.context_managers.hide`
.. versionadded:: 1.5
"""
warn_only = lambda: settings(warn_only=True)
warn_only.__doc__ = """
Alias to ``settings(warn_only=True)``.
.. seealso::
:ref:`env.warn_only <warn_only>`,
`~fabric.context_managers.settings`,
`~fabric.context_managers.quiet`
"""
| bsd-2-clause | -7,542,093,378,251,034,000 | 33.760797 | 91 | 0.632132 | false | 4.195269 | false | false | false |
LokiW/extendable-cards | extendable_cards/view/game_view.py | 1 | 6424 | from extendable_cards.view.graphics import Rectangle, Point, Text
from tkinter import Button
class GameOutline(object):
def __init__(self, window, dx, dy, w, h):
self.top_y = dy
self.bottom_y = dy+h
self.right_x = dx+w
self.left_x = dx
self.discard_end_x = dx + (w/6.0)
self.discard_top_y = self.bottom_y - (h/3.0)
discard_p_b = Point(dx+1, self.bottom_y-1)
discard_p_t = Point(self.discard_end_x, self.discard_top_y)
discard_text_p = Point((2*dx + (w/6.0))/2, (self.bottom_y - (h / 6.0)))
self.discard = Rectangle(discard_p_b, discard_p_t)
self.discard.setFill("grey")
self.discard_text = Text(discard_text_p, "DISCARD PILE")
self.deck_begin_x = self.right_x - (w/6.0)
deck_p_b = Point(self.right_x-1, self.bottom_y-1)
deck_p_t = Point(self.deck_begin_x, self.bottom_y - (h / 3.0))
deck_text_p = Point(self.right_x - (w / 12.0), self.bottom_y - (h / 6.0))
self.deck = Rectangle(deck_p_b, deck_p_t)
self.deck.setFill("grey")
self.deck_text = Text(deck_text_p, "DECK")
self.hand = []
self.in_play = []
self.selected = []
self.win = window
def display_outline(self):
self.discard.draw(self.win)
self.deck.draw(self.win)
def display_outline_with_labels(self):
self.display_outline()
self.deck_text.draw(self.win)
self.discard_text.draw(self.win)
def undisplay_labels(self):
self.deck_text.undraw()
self.discard_text.undraw()
def display_hand_area(self):
self._display_card_list(self.hand, PlayArea.HAND)
def undisplay_hand_area(self):
for card in self.hand:
card.undisplay()
def display_play_area(self):
self._display_card_list(self.in_play, PlayArea.IN_PLAY)
def _display_card_list(self, cards, play_area):
card_num = len(cards)
if card_num == 0:
return False
cur_card = 0
lx, by, rx, ty = self.get_area_points(play_area)
y_unit = (by - ty) / 50.0
card_height = by - ty - 2*y_unit
card_width = card_height * (5.0/7.0)
x_unit = ((rx - card_width) - lx)/card_num
for card in cards:
card.display_card(lx + (cur_card*x_unit), ty + y_unit, w=card_width, h=card_height)
cur_card += 1
def undisplay_play_area(self):
for card in self.in_play:
card.undisplay()
def select_cards(self, cards, play_area):
self.selected.append({'card': card_type(card), 'origin': play_area})
def select_card(self, card, play_area):
for card in cards:
if play_area == PlayArea.HAND:
for hc in self.hand[:]:
if hc.card.name == card.card.name:
self.selected.append({'card':hc, 'origin': play_area})
self.hand.remove(hc)
return
elif play_area == PlayArea.IN_PLAY:
for ipc in self.in_play[:]:
if ipc.card.name == card.card.name:
self.selected.append({'card':ipc, 'origin': play_area})
self.in_play.remove(ipc)
return
elif play_area == PlayArea.DECK or play_area == PlayArea.DISCARD:
self.selected.append({'card': card_type(card), 'origin': play_area})
elif play_area == PlayArea.SELECTION:
for sc, origin in self.selected:
if sc.card.name == card.card.name:
self.return_selections()
self.selected.append({'card': sc, 'origin': origin})
return
def return_selections(self):
self.undisplay_selection()
for card in self.selected[:]:
if card['origin'] == PlayArea.HAND:
self.hand.append(card)
self.selected.remove(card)
elif card['origin'] == PlayArea.IN_PLAY:
self.in_play.append(card)
self.selected.remove(card)
else:
self.selected.remove(card)
def display_selection(self):
self._display_card_list([item['card'] for item in self.selected], PlayArea.SELECTION)
def undisplay_selection(self):
for card in self.selected:
card.undisplay()
def add_to_hand_area(self, card_view):
self.hand.append(card_view)
def add_to_play_area(self, card_view):
self.in_play.append(card_view)
def get_card_at_point(self, point, area):
x = point.getX()
y = point.getY()
if area == PlayArea.HAND:
last_seen = None
for card in self.hand:
lx = min(card.card.getP1().getX(), card.card.getP2().getX())
if lx < x:
last_seen = card
else:
return last_seen
return last_seen
def get_area(self, point):
x = point.getX()
y = point.getY()
if y < self.discard_top_y:
return PlayArea.IN_PLAY
elif x < self.discard_end_x:
return PlayArea.DISCARD
elif x > self.deck_begin_x:
return PlayArea.DECK
elif len(self.selected) > 0:
return PlayArea.HAND
else:
return PlayArea.HAND
def get_area_points(self, area):
if area == PlayArea.IN_PLAY:
return (self.left_x, self.discard_top_y, self.right_x, self.top_y)
elif area == PlayArea.DISCARD:
return (self.left_x, self.bottom_y, self.discard_end_x, self.discard_top_y)
elif area == PlayArea.HAND:
return (self.discard_end_x, self.bottom_y, self.deck_begin_x, self.discard_top_y)
elif area == PlayArea.DECK:
return (self.deck_begin_x, self.bottom_y, self.right_x, self.discard_top_y)
elif area == PlayArea.SELECTION:
return (self.discard_end_x, self.bottom_y - (self.bottom_y - self.discard_top_y)*(2.0/3.0),
self.deck_begin_x, self.bottom_y - (self.bottom_y - self.discard_top_y)*(5.0/3.0))
class PlayArea(object):
IN_PLAY = "play"
DISCARD = "discard"
DECK = "deck"
HAND = "hand"
SELECTION = "selection"
| bsd-2-clause | 7,205,527,144,953,349,000 | 30.960199 | 103 | 0.538294 | false | 3.411577 | false | false | false |
google/tf-quant-finance | tf_quant_finance/experimental/pricing_platform/framework/market_data/rate_curve.py | 1 | 13760 | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of RateCurve object."""
from typing import Optional, Tuple
import tensorflow.compat.v2 as tf
from tf_quant_finance import datetime as dateslib
from tf_quant_finance import math
from tf_quant_finance import rates as rates_lib
from tf_quant_finance.experimental.pricing_platform.framework.core import curve_types
from tf_quant_finance.experimental.pricing_platform.framework.core import daycount_conventions
from tf_quant_finance.experimental.pricing_platform.framework.core import interpolation_method
from tf_quant_finance.experimental.pricing_platform.framework.core import processed_market_data as pmd
from tf_quant_finance.experimental.pricing_platform.framework.core import types
from tf_quant_finance.experimental.pricing_platform.framework.market_data import utils
_DayCountConventions = daycount_conventions.DayCountConventions
_InterpolationMethod = interpolation_method.InterpolationMethod
_DayCountConventionsProtoType = types.DayCountConventionsProtoType
class RateCurve(pmd.RateCurve):
"""Represents an interest rate curve."""
def __init__(
self,
maturity_dates: types.DateTensor,
discount_factors: tf.Tensor,
valuation_date: types.DateTensor,
interpolator: Optional[_InterpolationMethod] = None,
interpolate_rates: Optional[bool] = True,
daycount_convention: Optional[_DayCountConventionsProtoType] = None,
curve_type: Optional[curve_types.CurveType] = None,
dtype: Optional[tf.DType] = None,
name: Optional[str] = None):
"""Initializes the interest rate curve.
Args:
maturity_dates: A `DateTensor` containing the maturity dates on which the
curve is specified.
discount_factors: A `Tensor` of real dtype specifying the discount factors
corresponding to the input maturities. The shape of this input should
match the shape of `maturity_dates`.
valuation_date: A scalar `DateTensor` specifying the valuation (or
settlement) date for the curve.
interpolator: An instance of `InterpolationMethod`.
Default value: `None` in which case cubic interpolation is used.
interpolate_rates: A boolean specifying whether the interpolation should
be done in discount rates or discount factors space.
Default value: `True`, i.e., interpolation is done in the discount
factors space.
daycount_convention: `DayCountConventions` to use for the interpolation
purpose.
Default value: `None` which maps to actual/365 day count convention.
curve_type: An instance of `CurveTypes` to mark the rate curve.
Default value: `None` which means that the curve does not have the
marker.
dtype: `tf.Dtype`. Optional input specifying the dtype of the `rates`
input.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'rate_curve'.
"""
self._name = name or "rate_curve"
with tf.compat.v1.name_scope(self._name):
self._discount_factor_nodes = tf.convert_to_tensor(
discount_factors, dtype=dtype,
name="curve_discount_factors")
self._dtype = dtype or self._discount_factor_nodes.dtype
if interpolator is None or interpolator == _InterpolationMethod.CUBIC:
def cubic_interpolator(xi, x, y):
spline_coeffs = math.interpolation.cubic.build_spline(x, y)
return math.interpolation.cubic.interpolate(xi, spline_coeffs,
dtype=dtype)
interpolator = cubic_interpolator
self._interpolation_method = _InterpolationMethod.CUBIC
elif interpolator == _InterpolationMethod.LINEAR:
def linear_interpolator(xi, x, y):
return math.interpolation.linear.interpolate(xi, x, y,
dtype=dtype)
interpolator = linear_interpolator
self._interpolation_method = _InterpolationMethod.LINEAR
elif interpolator == _InterpolationMethod.CONSTANT_FORWARD:
def constant_fwd(xi, x, y):
return rates_lib.constant_fwd.interpolate(xi, x, y, dtype=dtype)
interpolator = constant_fwd
self._interpolation_method = _InterpolationMethod.CONSTANT_FORWARD
else:
raise ValueError(f"Unknown interpolation method {interpolator}.")
self._dates = dateslib.convert_to_date_tensor(maturity_dates)
self._valuation_date = dateslib.convert_to_date_tensor(
valuation_date)
self._daycount_convention = (
daycount_convention or _DayCountConventions.ACTUAL_365)
self._day_count_fn = utils.get_daycount_fn(self._daycount_convention)
self._times = self._get_time(self._dates)
self._interpolator = interpolator
self._interpolate_rates = interpolate_rates
# Precompute discount rates:
self._curve_type = curve_type
@property
def daycount_convention(self) -> types.DayCountConventionsProtoType:
"""Daycount convention."""
return self._daycount_convention
def daycount_fn(self):
"""Daycount function."""
return self._day_count_fn
@property
def discount_factor_nodes(self) -> types.FloatTensor:
"""Discount factors at the interpolation nodes."""
return self._discount_factor_nodes
@property
def node_dates(self) -> types.DateTensor:
"""Dates at which the discount factors and rates are specified."""
return self._dates
@property
def discount_rate_nodes(self) -> types.FloatTensor:
"""Discount rates at the interpolation nodes."""
discount_rates = tf.math.divide_no_nan(
-tf.math.log(self.discount_factor_nodes), self._times,
name="discount_rate_nodes")
return discount_rates
def set_discount_factor_nodes(self, values: types.FloatTensor):
"""Update discount factors at the interpolation nodes with new values."""
values = tf.convert_to_tensor(values, dtype=self._dtype)
values_shape = values.shape.as_list()
nodes_shape = self.discount_factor_nodes.shape.as_list()
if values_shape != nodes_shape:
raise ValueError("New values should have shape {0} but are of "
"shape {1}".format(nodes_shape, values_shape))
self._discount_factor_nodes = values
def discount_rate(self,
interpolation_dates: Optional[types.DateTensor] = None,
interpolation_times: Optional[types.FloatTensor] = None,
name: Optional[str] = None):
"""Returns interpolated rates at `interpolation_dates`."""
if interpolation_dates is None and interpolation_times is None:
raise ValueError("Either interpolation_dates or interpolation times "
"must be supplied.")
if interpolation_dates is not None:
interpolation_dates = dateslib.convert_to_date_tensor(
interpolation_dates)
times = self._get_time(interpolation_dates)
else:
times = tf.convert_to_tensor(interpolation_times, self._dtype)
rates = self._interpolator(times, self._times,
self.discount_rate_nodes)
if self._interpolate_rates:
rates = self._interpolator(times, self._times,
self.discount_rate_nodes)
else:
discount_factor = self._interpolator(
times, self._times, self.discount_factor_nodes)
rates = -tf.math.divide_no_nan(
tf.math.log(discount_factor), times)
return tf.identity(rates, name=name or "discount_rate")
def discount_factor(self,
interpolation_dates: Optional[types.DateTensor] = None,
interpolation_times: Optional[types.FloatTensor] = None,
name: Optional[str] = None):
"""Returns discount factors at `interpolation_dates`."""
if interpolation_dates is None and interpolation_times is None:
raise ValueError("Either interpolation_dates or interpolation times "
"must be supplied.")
if interpolation_dates is not None:
interpolation_dates = dateslib.convert_to_date_tensor(
interpolation_dates)
times = self._get_time(interpolation_dates)
else:
times = tf.convert_to_tensor(interpolation_times, self._dtype)
if self._interpolate_rates:
rates = self._interpolator(times, self._times,
self.discount_rate_nodes)
discount_factor = tf.math.exp(-rates * times)
else:
discount_factor = self._interpolator(
times, self._times, self.discount_factor_nodes)
return tf.identity(discount_factor, name=name or "discount_factor")
def forward_rate(
self,
start_date: Optional[types.DateTensor] = None,
maturity_date: Optional[types.DateTensor] = None,
start_time: Optional[types.FloatTensor] = None,
maturity_time: Optional[types.FloatTensor] = None,
day_count_fraction: Optional[tf.Tensor] = None):
"""Returns the simply accrued forward rate between [start_dt, maturity_dt].
Args:
start_date: A `DateTensor` specifying the start of the accrual period
for the forward rate. The function expects either `start_date` or
`start_time` to be specified.
maturity_date: A `DateTensor` specifying the end of the accrual period
for the forward rate. The shape of `end_date` must be broadcastable
with the shape of `start_date`. The function expects either `end_date`
or `end_time` to be specified.
start_time: A real `Tensor` specifying the start of the accrual period
for the forward rate. The function expects either `start_date` or
`start_time` to be specified.
maturity_time: A real `Tensor` specifying the end of the accrual period
for the forward rate. The shape of `end_date` must be broadcastable
with the shape of `start_date`. The function expects either `end_date`
or `end_time` to be specified.
day_count_fraction: An optional `Tensor` of real dtype specifying the
time between `start_date` and `maturity_date` in years computed using
the forward rate's day count basis. The shape of the input should be
the same as that of `start_date` and `maturity_date`.
Default value: `None`, in which case the daycount fraction is computed
using `daycount_convention`.
Returns:
A real `Tensor` of same shape as the inputs containing the simply
compounded forward rate.
"""
if start_date is None and start_time is None:
raise ValueError("Either start_date or start_times "
"must be supplied.")
if maturity_date is None and maturity_time is None:
raise ValueError("Either maturity_date or maturity_time must be "
"supplied.")
if start_date is not None and maturity_date is not None:
start_date = dateslib.convert_to_date_tensor(start_date)
maturity_date = dateslib.convert_to_date_tensor(maturity_date)
if day_count_fraction is None:
day_count_fn = self._day_count_fn
day_count_fraction = day_count_fn(
start_date=start_date, end_date=maturity_date, dtype=self._dtype)
else:
day_count_fraction = tf.convert_to_tensor(day_count_fraction,
self._dtype,
name="day_count_fraction")
start_time = self._get_time(start_date)
maturity_time = self._get_time(maturity_date)
else:
start_time = tf.convert_to_tensor(start_time, dtype=self._dtype)
maturity_time = tf.convert_to_tensor(maturity_time, dtype=self._dtype)
day_count_fraction = maturity_time - start_time
dfstart = self.discount_factor(interpolation_times=start_time)
dfmaturity = self.discount_factor(interpolation_times=maturity_time)
return tf.math.divide_no_nan(
tf.math.divide_no_nan(dfstart, dfmaturity) - 1., day_count_fraction)
@property
def valuation_date(self) -> types.DateTensor:
return self._valuation_date
@property
def interpolation_method(self) -> _InterpolationMethod:
return self._interpolation_method
def _get_time(self,
dates: types.DateTensor) -> types.FloatTensor:
"""Computes the year fraction from the curve's valuation date."""
return self._day_count_fn(start_date=self._valuation_date,
end_date=dates,
dtype=self._dtype)
@property
def curve_type(self) -> curve_types.CurveType:
return self._curve_type
def discount_factors_and_dates(self) -> Tuple[types.FloatTensor,
types.DateTensor]:
"""Returns discount factors and dates at which the discount curve is fitted.
"""
return (self._discount_factor_nodes, self._dates)
@property
def dtype(self) -> types.Dtype:
return self._dtype
@property
def interpolate_rates(self) -> bool:
"""Returns `True` if the interpolation is on rates and not on discounts."""
return self._interpolate_rates
__all__ = ["RateCurve"]
| apache-2.0 | -6,346,764,848,289,983,000 | 44.114754 | 102 | 0.672456 | false | 4.101341 | false | false | false |
Davasny/CCAS | ccas/models/exchanges/__init__.py | 1 | 1783 | from . import poloniex, btc_e, bittrex, bitfinex
from ccas.models import database, coinmarketcap
def get_balances(exchange, public_key, secret_key):
if exchange == "poloniex":
return poloniex.get_balances(public_key, secret_key)
if exchange == "btc-e":
return btc_e.get_balances(public_key, secret_key)
if exchange == "bittrex":
return bittrex.get_balances(public_key, secret_key)
if exchange == "bitfinex":
return bitfinex.get_balances(public_key, secret_key)
def get_exchanges():
response = database.new_query("SELECT id, exchange FROM exchanges_api_keys;")
return list(response)
def get_btc_price():
exchange = database.new_query("SELECT `exchange` FROM `coins_prices` WHERE `name`='btc';")
if exchange:
exchange = exchange[0][0]
if exchange == "poloniex":
return poloniex.get_btc_price()
if exchange == "btc-e":
return btc_e.get_btc_price()
if exchange == "bittrex":
return bittrex.get_btc_price()
if exchange == "bitfinex":
return bitfinex.get_btc_price()
else:
return -1
def get_price(currency):
exchange = database.new_query("SELECT `exchange` FROM `coins_prices` WHERE `name`='"+ currency.lower() +"';")
if exchange:
exchange = exchange[0][0]
if exchange == "poloniex":
return poloniex.get_price(currency)
if exchange == "btc-e":
return btc_e.get_price(currency)
if exchange == "bittrex":
return bittrex.get_price(currency)
if exchange == "bitfinex":
return bitfinex.get_price(currency)
if exchange == "coinmarketcap":
return coinmarketcap.get_price(currency)
else:
return -1
| mit | 4,158,025,492,351,887,000 | 33.960784 | 113 | 0.615816 | false | 3.402672 | false | false | false |
veryhappythings/discord-gather | gather/discord_gather.py | 1 | 2123 | import asyncio
import logging
import discord
from .gatherbot import GatherBot
from .organiser import Organiser
from . import commands
logger = logging.getLogger(__name__)
class DiscordGather:
def __init__(self, token):
self.token = token
self.bot = None
self.client = discord.Client()
self.client.on_ready = self.on_ready
asyncio.get_event_loop().call_soon(self._report_loop)
def run(self):
self.client.run(self.token)
async def on_ready(self):
self.bot = GatherBot(self.client.user.name)
self.bot.register_message_handler(self.client.send_message)
self.bot.register_action('^!help$', commands.bot_help)
self.bot.register_action('^!(?:add|join|s)$', commands.add)
self.bot.register_action('^!(?:remove|rem|so)$', commands.remove)
self.bot.register_action('^!(?:game|status)$', commands.game_status)
self.bot.register_action('^!(?:reset)$', commands.reset)
self.client.on_member_update = self.on_member_update
self.client.on_message = self.bot.on_message
logger.info('Logged in as')
logger.info(self.bot.username)
logger.info('------')
async def on_member_update(self, before, after):
# Handle players going offline
if (before.status == discord.Status.online and
after.status == discord.Status.offline):
await self.bot.member_went_offline(before)
# Handle players going AFK
elif (before.status == discord.Status.online and
after.status == discord.Status.idle):
await self.bot.member_went_afk(before)
def _report_loop(self):
if self.bot:
logger.info(report(self.bot.organiser))
asyncio.get_event_loop().call_later(60 * 10, self._report_loop)
def report(organiser: Organiser) -> str:
report = ["Report:"]
for key, queue in organiser.queues.items():
report.append("{}-{}: {} current players - {} games to date".format(
key.server, key, len(queue), organiser.games_count[key]))
return "\n".join(report)
| mit | -7,811,874,497,295,730,000 | 33.803279 | 76 | 0.628356 | false | 3.647766 | false | false | false |
GoogleCloudPlatform/cloudml-samples | chainer/containers/quickstart/mnist/trainer/mnist.py | 1 | 6554 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
# you may not use this file except in compliance with the License.\n",
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import six
import subprocess
import hypertune
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
from chainer import serializers
MODEL_FILE_NAME = 'chainer.model'
class Net(chainer.Chain):
def __init__(self):
super(Net, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(1, 10, ksize=5)
self.conv2 = L.Convolution2D(10, 20, ksize=5)
self.fc1 = L.Linear(None, 50)
self.fc2 = L.Linear(None, 10)
def forward(self, x):
x = F.relu(F.max_pooling_2d(self.conv1(x), 2))
x = F.relu(F.max_pooling_2d(F.dropout(self.conv2(x)), 2))
x = F.reshape(F.flatten(x), (-1, 320))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
class HpReport(chainer.training.Extension):
"""Trainer extension for hyper parameter tuning with CMLE.
Args:
log_report (str or LogReport): Log report to accumulate the
observations. This is either the name of a LogReport extensions
registered to the trainer, or a LogReport instance to use
internally.
global_step: key to epoch
hyperparameter_metric_tag: user-defined
metric_value: key to metric
"""
def __init__(self,
log_report='LogReport',
hp_global_step='epoch',
hp_metric_val='validation/main/loss',
hp_metric_tag='loss'):
self._log_report = log_report
self._log_len = 0 # number of observations already done
self._hp_global_step = hp_global_step
self._hp_metric_val = hp_metric_val
self._hp_metric_tag = hp_metric_tag
def __call__(self, trainer):
log_report = self._log_report
if isinstance(log_report, str):
log_report = trainer.get_extension(log_report)
elif isinstance(log_report, log_report_module.LogReport):
log_report(trainer) # update the log report
else:
raise TypeError('log report has a wrong type %s' %
type(log_report))
log = log_report.log
log_len = self._log_len
hpt = hypertune.HyperTune()
while len(log) > log_len:
target_log = log[log_len]
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag=self._hp_metric_tag,
metric_value=target_log[self._hp_metric_val],
global_step=target_log[self._hp_global_step])
log_len += 1
self.log_len = log_len
def get_args():
"""Argument parser.
Returns:
Dictionary of arguments.
"""
parser = argparse.ArgumentParser(description='Chainer MNIST Example')
parser.add_argument(
'--batch-size',
type=int,
default=100,
metavar='N',
help='input batch size for training (default: 100)')
parser.add_argument(
'--test-batch-size',
type=int,
default=1000,
metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument(
'--epochs',
type=int,
default=10,
metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument(
'--lr',
type=float,
default=0.01,
metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument(
'--momentum',
type=float,
default=0.5,
metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument(
'--model-dir',
default=None,
help='The directory to store the model')
parser.add_argument(
'--gpu',
type=int,
default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument(
'--resume',
action='store_true',
help='Resume training')
args = parser.parse_args()
return args
def main():
# Training settings
args = get_args()
# Set up a neural network to train
model = L.Classifier(Net())
if args.gpu >= 0:
# Make a specified GPU current
chainer.backends.cuda.get_device_from_id(args.gpu).use()
model.to_gpu() # Copy the model to the GPU
# Setup an optimizer
optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=args.momentum)
optimizer.setup(model)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist(ndim=3)
train_iter = chainer.iterators.SerialIterator(train, args.batch_size)
test_iter = chainer.iterators.SerialIterator(test, args.test_batch_size,
repeat=False, shuffle=False)
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(updater, (args.epochs, 'epoch'))
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Print selected entries of the log to stdout
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
# Send selected entries of the log to CMLE HP tuning system
trainer.extend(
HpReport(hp_metric_val='validation/main/loss', hp_metric_tag='my_loss'))
if args.resume:
# Resume from a snapshot
tmp_model_file = os.path.join('/tmp', MODEL_FILE_NAME)
if not os.path.exists(tmp_model_file):
subprocess.check_call([
'gsutil', 'cp', os.path.join(args.model_dir, MODEL_FILE_NAME),
tmp_model_file])
if os.path.exists(tmp_model_file):
chainer.serializers.load_npz(tmp_model_file, trainer)
trainer.run()
if args.model_dir:
tmp_model_file = os.path.join('/tmp', MODEL_FILE_NAME)
serializers.save_npz(tmp_model_file, model)
subprocess.check_call([
'gsutil', 'cp', tmp_model_file,
os.path.join(args.model_dir, MODEL_FILE_NAME)])
if __name__ == '__main__':
main()
| apache-2.0 | 508,161,105,582,383,800 | 30.209524 | 80 | 0.652121 | false | 3.529348 | true | false | false |
yasserglez/pytiger2c | packages/pytiger2c/ast/integerliteralexpressionnode.py | 1 | 2204 | # -*- coding: utf-8 -*-
"""
Clase C{IntegerLiteralExpressionNode} del árbol de sintáxis abstracta.
"""
from pytiger2c.ast.valuedexpressionnode import ValuedExpressionNode
from pytiger2c.types.integertype import IntegerType
class IntegerLiteralExpressionNode(ValuedExpressionNode):
"""
Clase C{IntegerLiteralExpressionNode} del árbol de sintáxis abstracta.
Representa un literal de un número entero en el lenguaje Tiger. El valor
de retorno de esta expresión siempre será C{IntegerType}.
"""
def _get_integer(self):
"""
Método para obtener el valor de la propiedad C{integer}.
"""
return self._integer
integer = property(_get_integer)
def __init__(self, integer):
"""
Inicializa la clase C{IntegerLiteralExpressionNode}.
@type integer: C{int}
@param integer: Valor del número entero literal.
"""
super(IntegerLiteralExpressionNode, self).__init__()
self._integer = integer
def check_semantics(self, scope, errors):
"""
Para obtener información acerca de los parámetros recibidos por
el método consulte la documentación del método C{check_semantics}
en la clase C{LanguageNode}.
Este nodo del árbol de sintáxis abstracta no requiere comprobación
semántica, solamente se da valor al tipo de retorno del nodo que
siempre será C{IntegerType}.
"""
self._scope = scope
self._return_type = IntegerType()
def generate_code(self, generator):
"""
Genera el código correspondiente a la estructura del lenguaje Tiger
representada por el nodo.
Para obtener información acerca de los parámetros recibidos por
este método consulte la documentación del método C{generate_code}
de la clase C{LanguageNode}.
"""
self.scope.generate_code(generator)
int_code_type = IntegerType().code_type
local_var = generator.define_local(int_code_type)
generator.add_statement('{0} = {1};'.format(local_var, self.integer))
self._code_name = local_var
| mit | 7,899,778,051,832,931,000 | 33.587302 | 77 | 0.652134 | false | 3.54886 | false | false | false |
jgmanzanas/CMNT_004_15 | project-addons/sale_display_stock/report/sale_order_line_report.py | 1 | 4447 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Comunitea All Rights Reserved
# $Jesús Ventosinos Mayor <[email protected]>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, tools
class sale_order_line_report(models.Model):
_name = 'sale.order.line.report'
_auto = False
name = fields.Char('Name', readonly=True)
partner_id = fields.Many2one('res.partner', 'Partner', readonly=True)
product_qty = fields.Float('Quantity', readonly=True)
uom = fields.Many2one('product.uom', 'UoM', readonly=True)
price_unit = fields.Float('Price unit', readonly=True)
discount = fields.Float('Discount', readonly=True)
salesman_id = fields.Many2one('res.users', 'Salesperson', readonly=True)
state = fields.Char('State', readonly=True)
product_id = fields.Many2one('product.product', 'Product', readonly=True)
order_id = fields.Many2one('sale.order', 'Order', readonly=True)
qty_kitchen = fields.Float('Qty in kitchen', group_operator="avg",
readonly=True)
qty_stock = fields.Float('Stock qty', group_operator="avg", readonly=True)
company_id = fields.Many2one("res.company", "Company", readonly=True)
def init(self, cr):
tools.drop_view_if_exists(cr, self._table)
cr.execute("""
CREATE or REPLACE VIEW sale_order_line_report as (SELECT sol.id as id,
sol.name as name,
sol.order_partner_id as partner_id,
sol.product_uom_qty as product_qty,
sol.product_uom as uom,
sol.price_unit as price_unit,
sol.discount as discount,
sol.salesman_id as salesman_id,
sol.state as state,
sol.order_id as order_id,
sol.company_id as company_id,
q_kt.product_id,
q_kt.qty AS qty_kitchen,
stck.qty AS qty_stock
FROM sale_order_line sol
LEFT JOIN (SELECT product_id,
Sum(qty) AS qty
FROM stock_quant
WHERE location_id IN (SELECT res_id
FROM ir_model_data
WHERE module = 'location_moves' AND name IN ('stock_location_kitchen','stock_location_pantry')
)
GROUP BY product_id) q_kt
ON sol.product_id = q_kt.product_id
LEFT JOIN (SELECT product_id,
Sum(qty) AS qty
FROM stock_quant
WHERE location_id IN (SELECT loc.id
FROM stock_location loc
INNER JOIN (SELECT parent_left,
parent_right
FROM stock_location
WHERE
id IN (select view_location_id from stock_warehouse))
stock
ON loc.parent_left >=
stock.parent_left
AND loc.parent_right <=
stock.parent_right)
GROUP BY product_id) stck
ON sol.product_id = stck.product_id
WHERE q_kt.qty > 0 and sol.id in (select sale_line_id from procurement_order po where po.state not in ('done', 'cancel'))
GROUP BY sol.id, sol.name, sol.order_partner_id, sol.product_uom_qty,
sol.product_uom, sol.price_unit, sol.discount, sol.company_id,
sol.salesman_id, sol.state, sol.order_id, q_kt.product_id, q_kt.qty, stck.qty)
""")
| agpl-3.0 | 516,351,656,280,464,100 | 47.326087 | 137 | 0.550157 | false | 4.143523 | false | false | false |
gevannmullins/linux_server | add_items.py | 1 | 4498 | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Category, Base, Item, User
engine = create_engine('postgresql://catalog:password@localhost/catalog')
# engine = create_engine('sqlite:///catalog.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Create dummy user
User1 = User(name="Caron Mullins", email="[email protected]", picture='https://pbs.twimg.com/profile_images/2671170543/18debd694829ed78203a5a36dd364160_400x400.png')
session.add(User1)
session.commit()
# Category 1 with 3 items
category1 = Category(user_id=1, name="Soccer", image="http://neobasketball.com/img/bballcourt.jpg")
session.add(category1)
session.commit()
item1 = Item(user_id=1, name="Soccer Ball", description="Soccer balls for practicing and match games.", category=category1)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Soccer Boots", description="Soccer boots to maxumise gameplay", category=category1)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Whistles", description="Whistles for training sessions.", category=category1)
session.add(item3)
session.commit()
# Next Categories and its items
category2 = Category(user_id=1, name="Basketball", image="http://neobasketball.com/img/bballcourt.jpg")
session.add(category2)
session.commit()
item1 = Item(user_id=1, name="Crew Socks", description="Stretchy ribbed socks extending to mid calf", category_id = category2.id)
session.add(item1)
session.commit()
# Categories 3
category3 = Category(user_id=1, name="Baseball", image="http://totalsportscomplex.com/wp-content/uploads/2014/09/baseball-pic.jpg")
session.add(category3)
session.commit()
item1 = Item(user_id=1, name="Crew Socks", description="Stretchy ribbed socks extending to mid calf", category_id = category3.id)
session.add(item1)
session.commit()
# Categories 4
category4 = Category(user_id=1, name="Frisbee", image="http://uvmbored.com/wp-content/uploads/2015/10/how_the_frisbee_took_flight.jpg")
session.add(category4)
session.commit()
item1 = Item(user_id=1, name="Flying Disc", description="A Flying disc or a Flying Saucer", category_id = category4.id)
session.add(item1)
session.commit()
# Categories 5
category5 = Category(user_id=1, name="Snowboarding", image="https://pantherfile.uwm.edu/collins9/www/finalproject5/Project_5/snowboarding3.jpg")
session.add(category5)
session.commit()
item1 = Item(user_id=1, name="Snowboard", description="Wooden board suitable to glide on snow", category_id = category5.id)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Goggles", description="Anit-glare protective safety glasses",category_id = category5.id)
session.add(item2)
session.commit()
# Categories 6
category6 = Category(user_id=1, name="Rock Climbing", image="http://asme.berkeley.edu/wordpress/wp-content/uploads/2013/11/Rock-Climbing-Wallpaper-HD.jpg")
session.add(category6)
session.commit()
item1 = Item(user_id=1, name="Shoes", description="Superior performance shoew wtih excellent grip", category_id = category6.id)
session.add(item1)
session.commit()
# Categories 7
category7 = Category(user_id=1, name="Skating", image="http://www.ocasia.org/Images-OCA/During-the-Roller-Skating-XXX-contest-between-XXX-_53834132011574.jpg")
session.add(category7)
session.commit()
item1 = Item(user_id=1, name="Skates", description="Roller skates with bearing suitable for beginner and advanced skater", category_id = category7.id)
session.add(item1)
session.commit()
# Categories 8
category8 = Category(user_id=1, name="Hockey", image="http://www.picture-newsletter.com/street-hockey/street-hockey-39.jpg")
session.add(category8)
session.commit()
item1 = Item(user_id=1, name="Stick", description="Composite Stick favorable for both ice and street hockey", category_id = category8.id)
session.add(item1)
session.commit()
print "added menu items!"
| mit | 530,226,847,194,362,200 | 32.318519 | 174 | 0.765229 | false | 3.026918 | false | false | false |
alphagov/backdrop | tests/read/test_parse_request_args.py | 1 | 5035 | from datetime import datetime
import re
import unittest
from hamcrest import assert_that, is_, has_item
import pytz
from werkzeug.datastructures import MultiDict
from backdrop.read.query import parse_request_args
class Test_parse_request_args(unittest.TestCase):
def test_start_at_is_parsed(self):
request_args = MultiDict([
("start_at", "2012-12-12T08:12:43+00:00")])
args = parse_request_args(request_args)
assert_that(args['start_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_first_start_at_is_used(self):
request_args = MultiDict([
("start_at", "2012-12-12T08:12:43+00:00"),
("start_at", "2012-12-13T08:12:43+00:00"),
])
args = parse_request_args(request_args)
assert_that(args['start_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_end_at_is_parsed(self):
request_args = MultiDict([
("end_at", "2012-12-12T08:12:43+00:00")])
args = parse_request_args(request_args)
assert_that(args['end_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_first_end_at_is_used(self):
request_args = MultiDict([
("end_at", "2012-12-12T08:12:43+00:00"),
("end_at", "2012-12-13T08:12:43+00:00"),
])
args = parse_request_args(request_args)
assert_that(args['end_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_one_filter_by_is_parsed(self):
request_args = MultiDict([
("filter_by", "foo:bar")])
args = parse_request_args(request_args)
assert_that(args['filter_by'], has_item(["foo", "bar"]))
def test_many_filter_by_are_parsed(self):
request_args = MultiDict([
("filter_by", "foo:bar"),
("filter_by", "bar:foo")
])
args = parse_request_args(request_args)
assert_that(args['filter_by'], has_item(["foo", "bar"]))
assert_that(args['filter_by'], has_item(["bar", "foo"]))
def test_build_query_with_boolean_value(self):
request_args = MultiDict([
("filter_by", "planet:true"),
("filter_by", "star:false"),
])
args = parse_request_args(request_args)
assert_that(args['filter_by'], has_item([ "planet", True ]))
assert_that(args['filter_by'], has_item([ "star", False ]))
def test_one_filter_by_prefix_is_parsed(self):
request_args = MultiDict([
("filter_by_prefix", "foo:/hello/world")])
args = parse_request_args(request_args)
assert_that(args['filter_by_prefix'],
has_item(["foo", '/hello/world']))
def test_many_filter_by_are_parsed(self):
request_args = MultiDict([
("filter_by_prefix", "foo:bar"),
("filter_by_prefix", "bar:foo")
])
args = parse_request_args(request_args)
assert_that(args['filter_by_prefix'], has_item(["foo", 'bar']))
assert_that(args['filter_by_prefix'], has_item(["bar", 'foo']))
def test_group_by_is_passed_through_untouched(self):
request_args = MultiDict([("group_by", "foobar")])
args = parse_request_args(request_args)
assert_that(args['group_by'], is_(['foobar']))
def test_sort_is_parsed(self):
request_args = MultiDict([
("sort_by", "foo:ascending")])
args = parse_request_args(request_args)
assert_that(args['sort_by'], is_(["foo", "ascending"]))
def test_sort_will_use_first_argument_only(self):
request_args = MultiDict([
("sort_by", "foo:descending"),
("sort_by", "foo:ascending"),
])
args = parse_request_args(request_args)
assert_that(args['sort_by'], is_(["foo", "descending"]))
def test_limit_is_parsed(self):
request_args = MultiDict([
("limit", "123")
])
args = parse_request_args(request_args)
assert_that(args['limit'], is_(123))
def test_one_collect_is_parsed_with_default_method(self):
request_args = MultiDict([
("collect", "some_key")
])
args = parse_request_args(request_args)
assert_that(args['collect'], is_([("some_key", "default")]))
def test_two_collects_are_parsed_with_default_methods(self):
request_args = MultiDict([
("collect", "some_key"),
("collect", "some_other_key")
])
args = parse_request_args(request_args)
assert_that(args['collect'], is_([("some_key", "default"),
("some_other_key", "default")]))
def test_one_collect_is_parsed_with_custom_method(self):
request_args = MultiDict([
("collect", "some_key:mean")
])
args = parse_request_args(request_args)
assert_that(args['collect'], is_([("some_key", "mean")]))
| mit | -5,928,915,904,435,119,000 | 29.70122 | 74 | 0.555511 | false | 3.498958 | true | false | false |
imgrant/fit2tcx | fit2tcx.py | 1 | 40690 | #!/usr/bin/env python
#
# fit2tcx - convert a FIT file to a TCX file
#
# Copyright (c) 2012, Gustav Tiger <[email protected]> [https://github.com/Tigge/FIT-to-TCX/]
# Copyright (c) 2014-2016, Ian Grant <[email protected]> [https://github.com/imgrant/fit2tcx]
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__version__ = "1.6"
import sys
import copy
import contextlib
import argparse
import lxml.etree
from datetime import datetime, timedelta
from pytz import timezone, utc
from tzwhere import tzwhere
from geopy.distance import GreatCircleDistance
from fitparse import FitFile, FitParseError
"""
Limit values for error checking on speed & distance calculations
"""
# Speed and distance calculated from GPS will be ignored
# for trackpoints where the acceleration from the last
# point is above this threshold (in m/s^2)
MAX_ACCELERATION = 3.0
"""
FIT to TCX values mapping
"""
LAP_TRIGGER_MAP = {
"manual": "Manual",
"time": "Time",
"distance": "Distance",
"position_start": "Location",
"position_lap": "Location",
"position_waypoint": "Location",
"position_marked": "Location",
"session_end": "Manual",
"fitness_equipment": "Manual"}
INTENSITY_MAP = {
"active": "Active",
"warmup": "Active",
"cooldown": "Active",
"rest": "Resting",
None: "Active"}
PRODUCT_MAP = {
0: "Unknown",
255: "Run Trainer 2.0", # Timex
# Garmin products:
1: "Garmin Connect API", # Also HRM1
2: "AXH01",
2: "AXH01",
4: "AXB02",
5: "HRM2SS",
6: "DSI_ALF02",
473: "Forerunner 301",
474: "Forerunner 301",
475: "Forerunner 301",
494: "Forerunner 301",
717: "Forerunner 405",
987: "Forerunner 405",
782: "Forerunner 50",
988: "Forerunner 60",
1011: "DSI_ALF01",
1018: "Forerunner 310XT",
1446: "Forerunner 310XT",
1036: "Edge 500",
1199: "Edge 500",
1213: "Edge 500",
1387: "Edge 500",
1422: "Edge 500",
1124: "Forerunner 110",
1274: "Forerunner 110",
1169: "Edge 800",
1333: "Edge 800",
1334: "Edge 800",
1497: "Edge 800",
1386: "Edge 800",
1253: "Chirp",
1325: "Edge 200",
1555: "Edge 200",
1328: "Forerunner 910XT",
1537: "Forerunner 910XT",
1600: "Forerunner 910XT",
1664: "Forerunner 910XT",
1765: "Forerunner 920XT",
1341: "ALF04",
1345: "Forerunner 610",
1410: "Forerunner 610",
1360: "Forerunner 210",
1436: "Forerunner 70",
1461: "AMX",
1482: "Forerunner 10",
1688: "Forerunner 10",
1499: "Swim",
1551: "Fenix",
1967: "Fenix 2",
1561: "Edge 510",
1742: "Edge 510",
1821: "Edge 510",
1567: "Edge 810",
1721: "Edge 810",
1822: "Edge 810",
1823: "Edge 810",
1836: "Edge 1000",
1570: "Tempe",
1735: "VIRB Elite",
1736: "Edge Touring",
1752: "HRM Run",
10007: "SDM4",
20119: "Training Center",
1623: "Forerunner 620",
2431: "Forerunner 235"}
"""
TCX schema and namespace values
"""
TCD_NAMESPACE = "http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2"
TCD = "{%s}" % TCD_NAMESPACE
XML_SCHEMA_NAMESPACE = "http://www.w3.org/2001/XMLSchema-instance"
XML_SCHEMA = "{%s}" % XML_SCHEMA_NAMESPACE
SCHEMA_LOCATION = \
"http://www.garmin.com/xmlschemas/ActivityExtension/v2 " + \
"http://www.garmin.com/xmlschemas/ActivityExtensionv2.xsd " + \
"http://www.garmin.com/xmlschemas/FatCalories/v1 " + \
"http://www.garmin.com/xmlschemas/fatcalorieextensionv1.xsd " + \
"http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2 " + \
"http://www.garmin.com/xmlschemas/TrainingCenterDatabasev2.xsd"
NSMAP = {
None: TCD_NAMESPACE,
"xsi": XML_SCHEMA_NAMESPACE}
# Class and context manager to suppress stdout for use with tzwhere.
class DummyFile(object):
def write(self, x): pass
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
sys.stdout = DummyFile()
yield
sys.stdout = save_stdout
class MyDataProcessor(object):
"""
Custom units data processor for FIT object
"""
def process_type_bool(self, field_data):
if field_data.value is not None:
field_data.value = bool(field_data.value)
def process_type_date_time(self, field_data):
value = field_data.value
if value is not None and value >= 0x10000000:
dt = datetime.utcfromtimestamp(631065600 + value)
field_data.value = utc.normalize(dt.replace(tzinfo=utc))
field_data.units = None # Units were 's', set to None
def process_type_local_date_time(self, field_data):
if field_data.value is not None:
dt = datetime.fromtimestamp(631065600 + field_data.value)
field_data.value = utc.normalize(dt.replace(tzinfo=utc))
field_data.units = None
def process_units_semicircles(self, field_data):
if field_data.value is not None:
field_data.value *= 180.0 / (2**31)
field_data.units = 'deg'
class TZDataProcessor(MyDataProcessor):
"""
Extra data processor layer for working with timezones.
For the Timex Run Trainer 2.0, date-times claim to be UTC (as per the FIT
format spec), but are actually an (unknown) local timezone.
If the data processor is called with a lat,lon point, we look up the true
timezone and re-normalize date-times to UTC.
Otherwise, if the data processor is called with a timezone name (defaults
to UTC, i.e. no difference), we use that and re-normalize.
"""
def __init__(self, lat=None, lon=None, tzname="UTC"):
if lat is not None and lon is not None:
with nostdout():
w = tzwhere.tzwhere()
self.tz = timezone(w.tzNameAt(lat, lon))
else:
self.tz = timezone(tzname)
def process_type_date_time(self, field_data):
value = field_data.value
if value is not None and value >= 0x10000000:
dt = datetime.utcfromtimestamp(631065600 + value)
dt = self.tz.localize(dt)
field_data.value = utc.normalize(dt)
field_data.units = None # Units were 's', set to None
def process_type_local_date_time(self, field_data):
if field_data.value is not None:
dt = datetime.fromtimestamp(631065600 + field_data.value)
dt = self.tz.localize(dt)
field_data.value = utc.normalize(dt)
field_data.units = None # Units were 's', set to None
def iso_Z_format(dt):
iso = dt.isoformat()
z_iso = iso.replace("+00:00", "Z")
return z_iso
def sum_distance(activity,
start_time=datetime(1899, 1, 1, 0, 0, 1, tzinfo=utc),
end_time=datetime(2189, 12, 31, 23, 59, 59, tzinfo=utc)):
"""
Calculate distance from GPS data for an activity
"""
# First build tps array (using timestamp as the index)
# in order to coalesce values at the same timepoint
# under a single trackpoint element
tps = {}
fit_epoch = datetime(1989, 12, 31, 0, 0, 0, tzinfo=utc)
for trackpoint in activity.get_messages('record'):
tts = trackpoint.get_value("timestamp")
tsi = int((tts - fit_epoch).total_seconds())
if tps.get(tsi) is None:
tps[tsi] = {
'timestamp': tts,
'distance': None,
'position_lat': None,
'position_long': None}
for var in ['distance',
'position_lat',
'position_long']:
if trackpoint.get_value(var) is not None:
tps[tsi][var] = trackpoint.get_value(var)
# For mid-activity laps, iterate through trackpoints to
# grab the first point before the start of the lap, also
# delete points that are not part of the lap
prev = None
for timestamp in sorted(tps, reverse=True):
tp = tps[timestamp]
if tp['timestamp'] < start_time and prev is None:
prev = copy.copy(tp)
if tp['timestamp'] < start_time or tp['timestamp'] > end_time:
del tps[timestamp]
# Then loop over tps array to calculate cumulative point-to-point
# distance from GPS data. Existing distance data (e.g. from footpod)
# is used when there is no GPS position available or it is bad.
distance = 0.0
for timestamp in sorted(tps):
tp = tps[timestamp]
if prev is not None:
if prev['distance'] is None:
prev_dist = 0
else:
prev_dist = prev['distance']
if not None in (tp['position_lat'],
tp['position_long'],
prev['position_lat'],
prev['position_long']):
try:
tp_timedelta = (tp['timestamp'] -
prev['timestamp']).total_seconds()
gps_dist = GreatCircleDistance(
(tp['position_lat'],
tp['position_long']),
(prev['position_lat'],
prev['position_long'])
).meters
gps_speed = (gps_dist / tp_timedelta)
# Fallback to existing distance/speed stream data
# if the GPS data looks erroneous (acceleration test)
if (gps_speed / tp_timedelta) > MAX_ACCELERATION:
gps_dist = tp['distance'] - prev_dist
except:
# Fallback to existing distance stream data on error
gps_dist = tp['distance'] - prev_dist
else:
# Fallback to existing distance stream data if no GPS coords
gps_dist = tp['distance'] - prev_dist
distance += gps_dist
prev = tp
return distance
def create_element(tag, text=None, namespace=None):
"""Create a free element"""
namespace = NSMAP[namespace]
tag = "{%s}%s" % (namespace, tag)
element = lxml.etree.Element(tag, nsmap=NSMAP)
if text is not None:
element.text = text
return element
def create_sub_element(parent, tag, text=None, namespace=None):
"""Create an element as a child of an existing given element"""
element = create_element(tag, text, namespace)
parent.append(element)
return element
def create_document():
"""Create a TCX XML document"""
document = create_element("TrainingCenterDatabase")
document.set(XML_SCHEMA + "schemaLocation", SCHEMA_LOCATION)
document = lxml.etree.ElementTree(document)
return document
def add_author(document):
"""Add author element (TCX writer) to TCX"""
author = create_sub_element(document.getroot(), "Author")
author.set(XML_SCHEMA + "type", "Application_t")
create_sub_element(author, "Name", "fit2tcx Converter")
build = create_sub_element(author, "Build")
version = create_sub_element(build, "Version")
vMajor, vMinor = tuple(map(int, (__version__.split("."))))
create_sub_element(version, "VersionMajor", str(vMajor))
create_sub_element(version, "VersionMinor", str(vMinor))
create_sub_element(version, "BuildMajor", "0")
create_sub_element(version, "BuildMinor", "0")
create_sub_element(author, "LangID", "en")
create_sub_element(author, "PartNumber", "000-00000-00")
def add_creator(element, manufacturer, product_name, product_id, serial):
"""Add creator element (recording device) to TCX activity"""
creator = create_sub_element(element, "Creator")
creator.set(XML_SCHEMA + "type", "Device_t")
create_sub_element(creator, "Name", manufacturer + " " + product_name)
unitID = int(serial or 0)
create_sub_element(creator, "UnitId", str(unitID))
# Set ProductID to 0 for non-Garmin devices
if manufacturer != "Garmin":
product_id = 0
create_sub_element(creator, "ProductID", str(product_id))
version = create_sub_element(creator, "Version")
create_sub_element(version, "VersionMajor", "0")
create_sub_element(version, "VersionMinor", "0")
create_sub_element(version, "BuildMajor", "0")
create_sub_element(version, "BuildMinor", "0")
def add_notes(element, text):
"""Add notes element to TCX activity"""
create_sub_element(element, "Notes", text)
def add_trackpoint(element, trackpoint, sport):
"""Create a trackpoint element"""
timestamp = trackpoint['timestamp']
pos_lat = trackpoint['position_lat']
pos_long = trackpoint['position_long']
distance = trackpoint['distance']
altitude = trackpoint['altitude']
speed = trackpoint['speed']
heart_rate = trackpoint['heart_rate']
cadence = trackpoint['cadence']
create_sub_element(element, "Time", iso_Z_format(timestamp))
if pos_lat is not None and pos_long is not None:
pos = create_sub_element(element, "Position")
create_sub_element(pos, "LatitudeDegrees", "{:.6f}".format(pos_lat))
create_sub_element(pos, "LongitudeDegrees", "{:.6f}".format(pos_long))
if altitude is not None:
create_sub_element(element, "AltitudeMeters", str(altitude))
if distance is not None:
create_sub_element(element, "DistanceMeters", str(distance))
if heart_rate is not None:
heartrateelem = create_sub_element(element, "HeartRateBpm")
heartrateelem.set(XML_SCHEMA + "type", "HeartRateInBeatsPerMinute_t")
create_sub_element(heartrateelem, "Value", str(heart_rate))
if speed is not None or cadence is not None:
if cadence is not None and sport == "Biking":
# Bike cadence is stored in main trackpoint element,
# not an extension, unlike running cadence (below)
create_sub_element(element, "Cadence", str(cadence))
exelem = create_sub_element(element, "Extensions")
tpx = create_sub_element(exelem, "TPX")
tpx.set("xmlns",
"http://www.garmin.com/xmlschemas/ActivityExtension/v2")
if speed is not None:
create_sub_element(tpx, "Speed", str(speed))
if cadence is not None:
if sport == "Running":
tpx.set("CadenceSensor", "Footpod")
create_sub_element(tpx, "RunCadence", str(cadence))
elif sport == "Biking":
tpx.set("CadenceSensor", "Bike")
def add_lap(element,
activity,
lap,
sport,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
fixed_distance,
activity_scaling_factor,
total_cumulative_distance):
"""Add a lap element to a TCX document"""
# Only process laps with timestamps - this serves as a workaround for
# extra fake/empty laps in FIT files from the Timex Run Trainer 2.0
if lap.get_value('timestamp') is not None:
lap_num = lap.get_value("message_index") + 1
start_time = lap.get_value("start_time")
end_time = lap.get_value("timestamp")
totaltime = lap.get_value("total_elapsed_time")
stored_distance = lap.get_value("total_distance")
calculated_distance = sum_distance(activity, start_time, end_time)
if fixed_distance is not None:
reference_distance = fixed_distance
else:
reference_distance = calculated_distance
try:
lap_scaling_factor = reference_distance / stored_distance
except ZeroDivisionError:
lap_scaling_factor = 1.00
if calibrate and per_lap_cal:
scaling_factor = lap_scaling_factor
else:
scaling_factor = activity_scaling_factor
max_speed = lap.get_value("max_speed")
avg_speed = lap.get_value("avg_speed")
calories = lap.get_value("total_calories")
avg_heart = lap.get_value("avg_heart_rate")
max_heart = lap.get_value("max_heart_rate")
intensity = INTENSITY_MAP[lap.get_value("intensity")]
avg_cadence = lap.get_value("avg_cadence")
max_cadence = lap.get_value("max_cadence")
if lap.get_value("lap_trigger"):
triggermet = LAP_TRIGGER_MAP[lap.get_value("lap_trigger")]
else:
triggermet = LAP_TRIGGER_MAP["manual"]
lapelem = create_sub_element(element, "Lap")
lapelem.set("StartTime", iso_Z_format(start_time))
#
# TotalTimeSeconds
#
create_sub_element(lapelem, "TotalTimeSeconds", str("%d" % totaltime))
#
# DistanceMeters
#
lap_dist_elem = create_sub_element(lapelem,
"DistanceMeters",
str("%d" % stored_distance)
)
#
# MaximumSpeed
#
lap_max_spd_elem = create_sub_element(lapelem,
"MaximumSpeed",
str("%.3f" % max_speed))
#
# Calories
#
create_sub_element(lapelem, "Calories", str("%d" % calories))
#
# AverageHeartRateBpm
#
if avg_heart is not None:
heartrateelem = create_sub_element(lapelem, "AverageHeartRateBpm")
heartrateelem.set(
XML_SCHEMA + "type", "HeartRateInBeatsPerMinute_t")
create_sub_element(heartrateelem, "Value", str("%d" % avg_heart))
#
# MaximumHeartRateBpm
#
if max_heart is not None:
heartrateelem = create_sub_element(lapelem, "MaximumHeartRateBpm")
heartrateelem.set(
XML_SCHEMA + "type", "HeartRateInBeatsPerMinute_t")
create_sub_element(heartrateelem, "Value", str("%d" % max_heart))
#
# Intensity
#
create_sub_element(lapelem, "Intensity", intensity)
#
# Cadence (bike)
#
if avg_speed or avg_cadence or max_cadence:
if sport == "Biking" and avg_cadence is not None:
# Average bike cadence is stored in main lap element,
# not as an extension, unlike average running cadence (below)
create_sub_element(lapelem, "Cadence", str("%d" % avg_cadence))
#
# TriggerMethod
#
create_sub_element(lapelem, "TriggerMethod", triggermet)
if dist_recalc:
distance_used = calculated_distance
elif calibrate:
if fixed_distance is not None:
distance_used = fixed_distance
else:
distance_used = stored_distance * scaling_factor
else:
distance_used = stored_distance
#
# Track
#
trackelem = create_sub_element(lapelem, "Track")
# First build tps array (using timestamp as the index)
# in order to coalesce values at the same timepoint
# under a single trackpoint element
tps = {}
fit_epoch = datetime(1989, 12, 31).replace(tzinfo=utc)
for trackpoint in activity.get_messages('record'):
tts = trackpoint.get_value("timestamp")
tsi = int((tts - fit_epoch).total_seconds())
if tps.get(tsi) is None:
tps[tsi] = {
'timestamp': tts,
'cadence': None,
'distance': None,
'position_lat': None,
'position_long': None,
'heart_rate': None,
'altitude': None,
'speed': None}
for var in ['cadence',
'distance',
'position_lat',
'position_long',
'heart_rate',
'altitude',
'speed']:
if trackpoint.get_value(var) is not None:
tps[tsi][var] = trackpoint.get_value(var)
# Iterate through all trackpoints to grab the first point before the
# start of the lap, then delete points that are not part of the lap
prev = None
for timestamp in sorted(tps, reverse=True):
tp = tps[timestamp]
if tp['timestamp'] < start_time and prev is None:
prev = copy.copy(tp)
if tp['timestamp'] < start_time or tp['timestamp'] > end_time:
del tps[timestamp]
# Then process all trackpoints for this lap, recalculating speed &
# distance from GPS and adjusting if requested, before adding element
stored_avg_speed = copy.copy(avg_speed)
stored_max_speed = copy.copy(max_speed)
distance = 0.0
max_speed = 0.0
tp_speed = None
for timestamp in sorted(tps):
tp = tps[timestamp]
trackpointelem = create_sub_element(trackelem, "Trackpoint")
if prev is not None:
if prev['distance'] is None:
prev['distance'] = 0
try:
tp_timedelta = (tp['timestamp'] -
prev['timestamp']).total_seconds()
gps_dist = GreatCircleDistance(
(tp['position_lat'],
tp['position_long']),
(prev['position_lat'],
prev['position_long'])
).meters
gps_speed = (gps_dist / tp_timedelta)
# Fallback to existing distance/speed stream data
# if the GPS data looks erroneous (acceleration test)
if (gps_speed / tp_timedelta) > MAX_ACCELERATION:
gps_speed = tp['speed']
gps_dist = tp['distance'] - prev['distance']
except:
gps_speed = tp['speed']
gps_dist = tp['distance'] - prev['distance']
if dist_recalc:
tp_dist = gps_dist
elif calibrate:
tp_dist = (
tp['distance'] - prev['distance']) * scaling_factor
else:
tp_dist = tp['distance'] - prev['distance']
try:
if speed_recalc:
tp_speed = gps_speed
elif calibrate:
tp_speed = tp['speed'] * scaling_factor
else:
tp_speed = tp['speed']
total_cumulative_distance += tp_dist
distance += tp_dist
if tp_speed > max_speed:
max_speed = tp_speed
except TypeError:
tp_speed = None
# Store previous trackpoint before changing the current one
prev = copy.copy(tp)
# Adjust trackpoint distance & speed values if requested
if ((dist_recalc or calibrate)
and tp['distance'] is not None
and total_cumulative_distance is not None):
tp['distance'] = "{:.1f}".format(total_cumulative_distance)
if ((speed_recalc or calibrate)
and tp['speed'] is not None
and tp_speed is not None):
tp['speed'] = "{:.3f}".format(tp_speed)
# Add trackpoint element
add_trackpoint(trackpointelem, tp, sport)
#
# Notes
#
if fixed_distance is not None:
precision_str = ("; known distance: {ref_dist:.3f} km "
"(FIT precision: {fit_precision:.1f}%; "
"GPS/footpod precision: {gps_precision:.1f}%)")
reference = "known distance"
else:
precision_str = " (precision: {precision:.1f}%)"
reference = "GPS/footpod"
try:
fit_precision_calc = (1 - (abs(reference_distance -
stored_distance) /
reference_distance)) * 100
gps_precision_calc = (1 - (abs(reference_distance -
calculated_distance) /
reference_distance)) * 100
precision_calc = (1 - (abs(calculated_distance -
stored_distance) /
calculated_distance)) * 100
except ZeroDivisionError:
fit_precision_calc = 100
gps_precision_calc = 100
precision_calc = 100
notes = ("Lap {lap_number:d}: {distance_used:.3f} km in {total_time!s}\n"
"Distance in FIT file: {fit_dist:.3f} km; "
"calculated via GPS/footpod: {gps_dist:.3f} km"
+ precision_str + "\n"
"Footpod calibration factor setting: {old_cf:.1f}%; "
"new factor based on {reference} for this lap: {new_cf:.1f}%"
).format(lap_number=lap_num,
distance_used=distance_used / 1000,
total_time=timedelta(seconds=int(totaltime)),
fit_dist=stored_distance / 1000,
gps_dist=calculated_distance / 1000,
ref_dist=reference_distance / 1000,
fit_precision=fit_precision_calc,
gps_precision=gps_precision_calc,
precision=precision_calc,
old_cf=current_cal_factor,
reference=reference,
new_cf=lap_scaling_factor * current_cal_factor)
add_notes(lapelem, notes)
#
# Extensions (AvgSpeed, AvgRunCadence, MaxRunCadence, MaxBikeCadence)
#
if not all(var is None for var in (avg_speed, avg_cadence, max_cadence)):
exelem = create_sub_element(lapelem, "Extensions")
lx = create_sub_element(exelem, "LX")
lx.set("xmlns",
"http://www.garmin.com/xmlschemas/ActivityExtension/v2")
if avg_speed is not None:
lap_avg_spd_elem = create_sub_element(lx,
"AvgSpeed",
str("%.3f" % avg_speed))
if avg_cadence is not None and sport == "Running":
create_sub_element(lx,
"AvgRunCadence",
str("%d" % avg_cadence))
if max_cadence is not None:
if sport == "Running":
create_sub_element(lx,
"MaxRunCadence",
str("%d" % max_cadence))
elif sport == "Biking":
create_sub_element(lx,
"MaxBikeCadence",
str("%d" % max_cadence))
# Adjust overall lap distance & speed values if required
if calibrate:
# Manual distance:
if fixed_distance is not None:
lap_dist_elem.text = "{:d}".format(int(fixed_distance))
lap_avg_spd_elem.text = "{:.3f}".format(
fixed_distance / totaltime)
else:
lap_dist_elem.text = "{:d}".format(
int(stored_distance * scaling_factor))
lap_avg_spd_elem.text = "{:.3f}".format(
stored_avg_speed * scaling_factor)
lap_max_spd_elem.text = "{:.3f}".format(
stored_max_speed * scaling_factor)
# GPS recalculation options override calibration:
if dist_recalc:
lap_dist_elem.text = "{:d}".format(int(distance))
if speed_recalc:
lap_avg_spd_elem.text = "{:.3f}".format(distance / totaltime)
lap_max_spd_elem.text = "{:.3f}".format(max_speed)
return distance
else:
return 0
def add_activity(element,
session,
activity,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
manual_lap_distance,
activity_scaling_factor):
"""Add an activity to a TCX document"""
# Sport type
sport = session.get_value("sport")
sport_mapping = {"running": "Running", "cycling": "Biking"}
sport = sport_mapping[sport] if sport in sport_mapping else "Other"
actelem = create_sub_element(element, "Activity")
actelem.set("Sport", sport)
create_sub_element(actelem,
"Id",
iso_Z_format(session.get_value("start_time")))
total_cumulative_distance = 0.0
lap_num = 0
for lap in activity.get_messages('lap'):
if lap.get_value("start_time") == lap.get_value("timestamp"):
continue # skip very short laps that won't have any data
if manual_lap_distance is not None:
try:
fixed_dist = manual_lap_distance[lap_num]
except IndexError:
fixed_dist = None
else:
fixed_dist = None
lap_dist = add_lap(actelem,
activity,
lap,
sport,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
fixed_dist,
activity_scaling_factor,
total_cumulative_distance)
total_cumulative_distance += lap_dist
lap_num += 1
return (actelem, total_cumulative_distance)
def convert(filename,
time_zone="auto",
dist_recalc=False,
speed_recalc=False,
calibrate=False,
per_lap_cal=False,
manual_lap_distance=None,
current_cal_factor=100.0):
"""Convert a FIT file to TCX format"""
# Calibration requires either GPS recalculation or manual lap distance(s):
if calibrate and not dist_recalc and manual_lap_distance is None:
sys.stderr.write("Calibration requested, enabling distance recalculation from GPS/footpod.\n")
dist_recalc = True
# Calibration with manual lap distances implies
# per-lap calibration:
if calibrate and manual_lap_distance is not None:
per_lap_cal = True
document = create_document()
element = create_sub_element(document.getroot(), "Activities")
try:
if time_zone == "auto":
# We need activity object to be able to get trackpoints,
# before re-creating activity again with timezone info
activity = FitFile(filename,
check_crc=False,
data_processor=MyDataProcessor())
activity.parse()
lat = None
lon = None
for trackpoint in activity.get_messages('record'):
if lat is not None and lon is not None:
break
lat = trackpoint.get_value("position_lat")
lon = trackpoint.get_value("position_long")
if lat is not None and lon is not None:
activity = FitFile(filename,
check_crc=False,
data_processor=TZDataProcessor(lat=lat,
lon=lon))
else:
activity = FitFile(filename,
check_crc=False,
data_processor=TZDataProcessor(tzname=time_zone))
activity.parse()
session = next(activity.get_messages('session'))
total_activity_distance = session.get_value('total_distance')
total_calculated_distance = sum_distance(activity)
activity_scaling_factor = (total_calculated_distance /
total_activity_distance)
new_cal_factor = activity_scaling_factor * current_cal_factor
actelem, total_distance = add_activity(element,
session,
activity,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
manual_lap_distance,
activity_scaling_factor)
except FitParseError as e:
sys.stderr.write(str("Error while parsing .FIT file: %s" % e) + "\n")
sys.exit(1)
if dist_recalc:
distance_used = total_calculated_distance
elif calibrate:
distance_used = total_distance
else:
distance_used = total_activity_distance
method = ""
if dist_recalc or speed_recalc or calibrate:
parts = []
if calibrate:
if per_lap_cal:
parts.append("calibration applied per lap")
else:
parts.append("calibration applied")
if dist_recalc and speed_recalc:
parts.append("speed and distance recalculated")
elif dist_recalc:
parts.append("distance recalculated")
elif speed_recalc:
parts.append("speed recalculated")
if calibrate and manual_lap_distance is not None:
reference = " from known distance (with GPS fill-in)"
elif dist_recalc or speed_recalc:
reference = " from GPS/footpod"
method = "(" + ", ".join(parts) + reference + ")"
notes = ("{total_laps:d} laps: {distance_used:.3f} km in {total_time!s} {dist_method:s}\n"
"Distance in FIT file: {fit_dist:.3f} km; "
"calculated via GPS/footpod: {gps_dist:.3f} km "
"(precision: {precision:.1f}%)\n"
"Footpod calibration factor setting: {old_cf:.1f}%; "
"new factor based on recomputed distance: {new_cf:.1f}%"
).format(total_laps=session.get_value('num_laps'),
distance_used=distance_used / 1000,
total_time=timedelta(seconds=int(session.get_value(
'total_timer_time'))),
fit_dist=total_activity_distance / 1000,
gps_dist=total_calculated_distance / 1000,
precision=(1 - (abs(total_calculated_distance -
total_activity_distance) /
total_calculated_distance)) * 100,
old_cf=current_cal_factor,
new_cf=new_cal_factor,
dist_method=method)
add_notes(actelem, notes)
try:
dinfo = next(activity.get_messages('device_info'))
manufacturer = dinfo.get_value('manufacturer').title().replace('_', ' ')
product_name = dinfo.get_value('descriptor').replace('_', ' ')
product_id = dinfo.get_value('product')
serial_number = dinfo.get_value('serial_number')
except: # if no device_info message, StopIteration is thrown
fid = next(activity.get_messages('file_id'))
manufacturer = fid.get_value('manufacturer').title().replace('_', ' ')
product_id = fid.get_value('product')
product_name = PRODUCT_MAP[product_id] if product_id in PRODUCT_MAP else product_id
serial_number = fid.get_value('serial_number')
add_creator(actelem,
manufacturer,
product_name,
product_id,
serial_number
)
add_author(document)
return document
def main():
"""Read arguments from command line to convert FIT file to TCX"""
parser = argparse.ArgumentParser(prog="fit2tcx")
parser.add_argument("FitFile", help="Input FIT file")
parser.add_argument("TcxFile", help="Output TCX file")
parser.add_argument(
"-v",
"--version",
action='version',
version='%(prog)s {version}'.format(version=__version__))
parser.add_argument(
"-z",
"--timezone",
action="store",
type=str,
default="auto",
help="Specify the timezone for FIT file timestamps (default, 'auto', uses GPS data to lookup the local timezone)")
parser.add_argument(
"-d",
"--recalculate-distance-from-gps",
action="store_true",
help="Recalculate distance from GPS data")
parser.add_argument(
"-s",
"--recalculate-speed-from-gps",
action="store_true",
help="Recalculate speed from GPS data")
parser.add_argument(
"-c",
"--calibrate-footpod",
action="store_true",
help="Use GPS-measured and/or known distance to calibrate footpod data")
parser.add_argument(
"-p",
"--per-lap-calibration",
action="store_true",
help="Apply footpod calibration on a per lap basis")
parser.add_argument(
"-l",
"--manual-lap-distance",
action="append",
default=None,
type=float,
help="Manually specify known lap distance(s) (in metres, use calibration to apply)")
parser.add_argument(
"-f",
"--calibration-factor",
action="store",
default=100.0,
type=float,
help="Existing calibration factor (defaults to 100.0)")
args = parser.parse_args()
if (args.calibrate_footpod and
not args.recalculate_distance_from_gps and
not args.manual_lap_distance):
parser.error("-c (--calibrate-footpod) requires either -d (--recalculate-distance-from-gps) or -l (--manual-lap-distance)")
return 1
try:
document = convert(args.FitFile,
args.timezone,
args.recalculate_distance_from_gps,
args.recalculate_speed_from_gps,
args.calibrate_footpod,
args.per_lap_calibration,
args.manual_lap_distance,
args.calibration_factor)
activity_notes = document.getroot().findtext(".//{*}Activity/{*}Notes")
if activity_notes is not None:
sys.stdout.write(str(activity_notes) + "\n")
tcx = open(args.TcxFile, 'wb')
tcx.write(lxml.etree.tostring(document.getroot(),
pretty_print=True,
xml_declaration=True,
encoding="UTF-8"))
return 0
except FitParseError as exception:
sys.stderr.write(str(exception) + "\n")
return 1
if __name__ == "__main__":
sys.exit(main())
| mit | 1,875,265,608,820,062,500 | 36.641073 | 131 | 0.541239 | false | 4.032706 | false | false | false |
gkabbe/cMDLMC | mdlmc/IO/converters.py | 1 | 1764 | # coding=utf-8
import logging
import os
import pathlib
import tables
import h5py
import daiquiri
import fire
import numpy as np
from typing import Union, Iterable
from ..atoms.numpy_atom import dtype_xyz
from ..atoms import numpy_atom as npa
from ..IO.trajectory_parser import XYZTrajectory
logger = logging.getLogger(__name__)
def save_xyz_to_hdf5(xyz_fname, hdf5_fname=None, *, remove_com_movement=False,
dataset_name="trajectory", selection=None):
"""
Note: HDF5 with Blosc compression currently only works if h5py and pytables are installed via
conda!"""
xyz = XYZTrajectory(xyz_fname, selection=selection)
logger.info("Determine length of xyz trajectory.")
trajectory_length = len(xyz)
first_frame = next(iter(xyz))
frame_shape = first_frame.atom_positions.shape
atom_names = first_frame.atom_names.astype("S")
logger.info("Names: %s", atom_names)
if not hdf5_fname:
hdf5_fname = os.path.splitext(xyz_fname)[0] + ".hdf5"
with h5py.File(hdf5_fname, "w") as hdf5_file:
# Use blosc compression (needs tables import and code 32001)
traj_atomnames = hdf5_file.create_dataset("atom_names", atom_names.shape, dtype="2S")
traj_atomnames[:] = atom_names
traj = hdf5_file.create_dataset(dataset_name, shape=(trajectory_length, *frame_shape),
dtype=np.float32, compression=32001)
for i, xyz_frame in enumerate(xyz):
if remove_com_movement:
npa.remove_center_of_mass_movement(xyz_frame)
if i % 1000 == 0:
logger.info("Frame %i", i)
traj[i] = xyz_frame.atom_positions
def main():
daiquiri.setup(level=logging.INFO)
fire.Fire()
| gpl-3.0 | -4,830,993,265,289,770,000 | 31.072727 | 97 | 0.652494 | false | 3.431907 | false | false | false |
chromium2014/src | tools/perf/page_sets/intl_ko_th_vi.py | 1 | 1913 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class IntlKoThViPage(page_module.Page):
def __init__(self, url, page_set):
super(IntlKoThViPage, self).__init__(url=url, page_set=page_set)
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/intl_ko_th_vi.json'
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class IntlKoThViPageSet(page_set_module.PageSet):
""" Popular pages in Korean, Thai and Vietnamese. """
def __init__(self):
super(IntlKoThViPageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/intl_ko_th_vi.json',
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
# Why: #7 site in Vietnam
'http://us.24h.com.vn/',
# Why: #6 site in Vietnam
'http://vnexpress.net/',
# Why: #18 site in Vietnam
'http://vietnamnet.vn/',
# Why: #5 site in Vietnam
# pylint: disable=C0301
'http://news.zing.vn/the-gioi/ba-dam-thep-margaret-thatcher-qua-doi/a312895.html#home_noibat1',
'http://kenh14.vn/home.chn',
# Why: #5 site in Korea
'http://www.naver.com/',
# Why: #9 site in Korea
'http://www.daum.net/',
# Why: #25 site in Korea
'http://www.donga.com/',
'http://www.chosun.com/',
'http://www.danawa.com/',
# Why: #10 site in Thailand
'http://pantip.com/',
'http://thaimisc.com/'
]
for url in urls_list:
self.AddPage(IntlKoThViPage(url, self))
| bsd-3-clause | -6,543,040,283,645,747,000 | 31.423729 | 101 | 0.644015 | false | 2.989063 | false | false | false |
jakevdp/altair | altair/utils/deprecation.py | 1 | 1447 | import warnings
# import functools
class AltairDeprecationWarning(UserWarning):
pass
def _deprecated(obj, name=None, message=None):
"""Return a version of a class or function that raises a deprecation warning.
Parameters
----------
obj : class or function
The object to create a deprecated version of.
name : string (optional)
The name of the deprecated object
message : string (optional)
The deprecation message
Returns
-------
deprecated_obj :
The deprecated version of obj
Examples
--------
>>> class Foo(object): pass
>>> OldFoo = _deprecated(Foo, "OldFoo")
>>> f = OldFoo() # doctest: +SKIP
AltairDeprecationWarning: alt.OldFoo is deprecated. Use alt.Foo instead.
"""
if message is None:
message = ("alt.{} is deprecated. Use alt.{} instead."
"".format(name, obj.__name__))
if isinstance(obj, type):
return type(name, (obj,),
{'__doc__': obj.__doc__,
'__init__': _deprecated(obj.__init__, "__init__", message)})
elif callable(obj):
# @functools.wraps(obj) # TODO: use this in Py3 only
def new_obj(*args, **kwargs):
warnings.warn(message, AltairDeprecationWarning)
return obj(*args, **kwargs)
return new_obj
else:
raise ValueError("Cannot deprecate object of type {}".format(type(obj)))
| bsd-3-clause | 4,549,240,084,075,891,000 | 29.787234 | 81 | 0.583967 | false | 4.425076 | false | false | false |
kd0aij/matrixpilot_old | Tools/MAVLink/MAVProxy/modules/antenna.py | 1 | 2346 | #!/usr/bin/env python
'''
antenna pointing module
Andrew Tridgell
June 2012
'''
import sys, os, time
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'cuav', 'lib'))
import cuav_util
mpstate = None
class module_state(object):
def __init__(self):
self.gcs_location = None
self.last_bearing = 0
self.last_announce = 0
def name():
'''return module name'''
return "antenna"
def description():
'''return module description'''
return "antenna pointing module"
def cmd_antenna(args):
'''set gcs location'''
state = mpstate.antenna_state
usage = "antenna lat lon"
if len(args) != 2:
if state.gcs_location is None:
print("GCS location not set")
else:
print("GCS location %s" % str(state.gcs_location))
return
state.gcs_location = (float(args[0]), float(args[1]))
def init(_mpstate):
'''initialise module'''
global mpstate
mpstate = _mpstate
mpstate.antenna_state = module_state()
mpstate.command_map['antenna'] = (cmd_antenna, "antenna link control")
def unload():
'''unload module'''
pass
def mavlink_packet(m):
'''handle an incoming mavlink packet'''
state = mpstate.antenna_state
if state.gcs_location is None and mpstate.status.wploader.count() > 0:
home = mpstate.status.wploader.wp(0)
mpstate.antenna_state.gcs_location = (home.x, home.y)
print("Antenna home set")
if state.gcs_location is None:
return
if m.get_type() == 'GPS_RAW' and state.gcs_location is not None:
(gcs_lat, gcs_lon) = state.gcs_location
bearing = cuav_util.gps_bearing(gcs_lat, gcs_lon, m.lat, m.lon)
elif m.get_type() == 'GPS_RAW_INT' and state.gcs_location is not None:
(gcs_lat, gcs_lon) = state.gcs_location
bearing = cuav_util.gps_bearing(gcs_lat, gcs_lon, m.lat/1.0e7, m.lon/1.0e7)
else:
return
mpstate.console.set_status('Antenna', 'Antenna %.0f' % bearing, row=0)
if abs(bearing - state.last_bearing) > 5 and (time.time() - state.last_announce) > 15:
state.last_bearing = bearing
state.last_announce = time.time()
mpstate.functions.say("Antenna %u" % int(bearing+0.5))
| gpl-3.0 | 5,135,345,788,596,944,000 | 30.583333 | 104 | 0.602728 | false | 3.222527 | false | false | false |
kubernetes-client/python | kubernetes/client/models/v1alpha1_webhook_throttle_config.py | 1 | 4435 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1alpha1WebhookThrottleConfig(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'burst': 'int',
'qps': 'int'
}
attribute_map = {
'burst': 'burst',
'qps': 'qps'
}
def __init__(self, burst=None, qps=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1WebhookThrottleConfig - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._burst = None
self._qps = None
self.discriminator = None
if burst is not None:
self.burst = burst
if qps is not None:
self.qps = qps
@property
def burst(self):
"""Gets the burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS # noqa: E501
:return: The burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
:rtype: int
"""
return self._burst
@burst.setter
def burst(self, burst):
"""Sets the burst of this V1alpha1WebhookThrottleConfig.
ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS # noqa: E501
:param burst: The burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
:type: int
"""
self._burst = burst
@property
def qps(self):
"""Gets the qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
ThrottleQPS maximum number of batches per second default 10 QPS # noqa: E501
:return: The qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
:rtype: int
"""
return self._qps
@qps.setter
def qps(self, qps):
"""Sets the qps of this V1alpha1WebhookThrottleConfig.
ThrottleQPS maximum number of batches per second default 10 QPS # noqa: E501
:param qps: The qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
:type: int
"""
self._qps = qps
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1WebhookThrottleConfig):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1WebhookThrottleConfig):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 | -7,392,792,150,688,209,000 | 28.566667 | 124 | 0.583089 | false | 3.988309 | true | false | false |
adamcaudill/yawast | yawast/scanner/plugins/http/applications/wordpress.py | 1 | 7344 | # Copyright (c) 2013 - 2020 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
import re
from typing import Tuple, Union, List, cast
from urllib.parse import urljoin
from packaging import version
from requests import Response
from yawast.reporting.enums import Vulnerabilities
from yawast.scanner.plugins.evidence import Evidence
from yawast.scanner.plugins.http import version_checker, response_scanner
from yawast.scanner.plugins.result import Result
from yawast.shared import network, output
def identify(url: str) -> Tuple[Union[str, None], List[Result]]:
results = []
# find WordPress
res, path = _identify_by_path(url, "")
if path is None:
res, path = _identify_by_path(url, "blog/")
# check to see if we have a valid hit
if path is not None:
# we have a WordPress install, let's see if we can get a version
body = res.text
ver = "Unknown"
# this works for modern versions
m = re.search(r"login.min.css\?ver=\d+\.\d+\.?\d*", body)
if m:
ver = m.group(0).split("=")[1]
else:
# the current method doesn't work, fall back to an older method
m = re.search(r"load-styles.php\?[\w,;=&%]+;ver=\d+\.\d+\.?\d*", body)
if m:
ver = m.group(0).split("=")[-1]
# report that we found WordPress
results.append(
Result.from_evidence(
Evidence.from_response(res, {"version": ver}),
f"Found WordPress v{ver} at {path}",
Vulnerabilities.APP_WORDPRESS_VERSION,
)
)
# is this a current version?
ver = cast(version.Version, version.parse(ver))
curr_version = version_checker.get_latest_version("wordpress", ver)
if curr_version is not None and curr_version > ver:
results.append(
Result.from_evidence(
Evidence.from_response(
res,
{
"installed_version": str(ver),
"current_verison": str(curr_version),
},
),
f"WordPress Outdated: {ver} - Current: {curr_version}",
Vulnerabilities.APP_WORDPRESS_OUTDATED,
)
)
return path, results
else:
return None, []
def check_path_disclosure(wp_url: str) -> List[Result]:
# this is a list of files that are known to throw a fatal error when accessed directly
# this is from a manual review of all plugins with at least 1M installs
urls = [
"wp-content/plugins/hello.php",
"wp-content/plugins/akismet/akismet.php",
"wp-content/plugins/contact-form-7/includes/capabilities.php",
"wp-content/plugins/wordpress-seo/admin/views/partial-alerts-errors.php",
"wp-content/plugins/jetpack/load-jetpack.php",
"wp-content/plugins/jetpack/uninstall.php",
"wp-content/plugins/duplicate-post/duplicate-post-admin.php",
"wp-content/plugins/wpforms-lite/includes/admin/class-welcome.php",
"wp-content/plugins/wp-google-maps/base/includes/welcome.php",
"wp-content/plugins/wp-super-cache/wp-cache.php",
"wp-content/plugins/mailchimp-for-wp/integrations/wpforms/bootstrap.php",
"wp-content/plugins/mailchimp-for-wp/integrations/bootstrap.php",
"wp-content/plugins/regenerate-thumbnails/regenerate-thumbnails.php",
"wp-content/plugins/advanced-custom-fields/includes/deprecated.php",
"wp-content/plugins/redirection/redirection.php",
"wp-content/plugins/wpforms-lite/includes/admin/importers/class-ninja-forms.php",
"wp-content/plugins/ninja-forms/includes/deprecated.php",
"wp-content/plugins/so-widgets-bundle/so-widgets-bundle.php",
"wp-content/plugins/wp-fastest-cache/templates/preload.php",
"wp-content/plugins/duplicate-page/duplicatepage.php",
"wp-content/plugins/better-wp-security/better-wp-security.php",
"wp-content/plugins/all-in-one-wp-security-and-firewall/other-includes/wp-security-unlock-request.php",
"wp-content/plugins/related-posts/views/settings.php",
"wp-content/plugins/wpcontentguard/views/settings.php",
"wp-content/plugins/simple-social-icons/simple-social-icons.php",
]
results: List[Result] = []
for url in urls:
target = urljoin(wp_url, url)
head = network.http_head(target, False)
if head.status_code != 404:
resp = network.http_get(target, False)
if resp.status_code < 300 or resp.status_code >= 500:
# we have some kind of response that could be useful
if "<b>Fatal error</b>:" in resp.text:
# we have an error
pattern = r"<b>((\/|[A-Z]:\\).*.php)<\/b>"
if re.search(pattern, resp.text):
try:
path = re.findall(pattern, resp.text)[0][0]
results.append(
Result.from_evidence(
Evidence.from_response(resp, {"path": path}),
f"WordPress File Path Disclosure: {target} ({path})",
Vulnerabilities.APP_WORDPRESS_PATH_DISCLOSURE,
)
)
except Exception:
output.debug_exception()
results += response_scanner.check_response(target, resp)
return results
def check_json_user_enum(url: str) -> List[Result]:
results = []
target = urljoin(url, "wp-json/wp/v2/users")
res = network.http_get(target, False)
body = res.text
if res.status_code < 300 and "slug" in body:
data = res.json()
# log the enum finding
results.append(
Result.from_evidence(
Evidence.from_response(res),
f"WordPress WP-JSON User Enumeration at {target}",
Vulnerabilities.APP_WORDPRESS_USER_ENUM_API,
)
)
# log the individual users
for user in data:
results.append(
Result.from_evidence(
Evidence.from_response(
res,
{
"user_id": user["id"],
"user_slug": user["slug"],
"user_name": user["name"],
},
),
f"ID: {user['id']}\tUser Slug: '{user['slug']}'\t\tUser Name: '{user['name']}'",
Vulnerabilities.APP_WORDPRESS_USER_FOUND,
)
)
return results
def _identify_by_path(url: str, path: str) -> Tuple[Response, Union[str, None]]:
target = urljoin(url, f"{path}wp-login.php")
res = network.http_get(target, False)
body = res.text
if res.status_code == 200 and "Powered by WordPress" in body:
return res, urljoin(url, path)
else:
return res, None
| mit | 6,941,804,610,785,816,000 | 38.483871 | 111 | 0.564134 | false | 3.881607 | false | false | false |
cysuncn/python | spark/crm/PROC_M_R_RET_CUST_FLOW.py | 1 | 4734 | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_M_R_RET_CUST_FLOW').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
#MCRM_RET_CUST_FLOW 增量 删除当天文件
ret = os.system("hdfs dfs -rm -r /"+dbname+"/MCRM_RET_CUST_FLOW/"+V_DT+".parquet")
MCRM_RET_CUST_ASSETS = sqlContext.read.parquet(hdfs+'/MCRM_RET_CUST_ASSETS/*')
MCRM_RET_CUST_ASSETS.registerTempTable("MCRM_RET_CUST_ASSETS")
ACRM_F_AG_AGREEMENT = sqlContext.read.parquet(hdfs+'/ACRM_F_AG_AGREEMENT/*')
ACRM_F_AG_AGREEMENT.registerTempTable("ACRM_F_AG_AGREEMENT")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT CUST_ID AS CUST_ID
,FR_ID AS FR_ID
,MIN(concat(SUBSTR(START_DATE, 1, 4),'-',SUBSTR(START_DATE, 6, 2),'-',SUBSTR(START_DATE, 9, 2))) AS OPEN_DATE
,MAX(concat(SUBSTR(END_DATE, 1, 4),'-',SUBSTR(END_DATE, 6, 2),'-',SUBSTR(END_DATE, 9, 2))) AS CANCEL_DATE
FROM ACRM_F_AG_AGREEMENT A --客户协议表
GROUP BY FR_ID
,CUST_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
TMP_MCRM_RET_CUST_FLOW_01 = sqlContext.sql(sql)
TMP_MCRM_RET_CUST_FLOW_01.registerTempTable("TMP_MCRM_RET_CUST_FLOW_01")
dfn="TMP_MCRM_RET_CUST_FLOW_01/"+V_DT+".parquet"
TMP_MCRM_RET_CUST_FLOW_01.cache()
nrows = TMP_MCRM_RET_CUST_FLOW_01.count()
TMP_MCRM_RET_CUST_FLOW_01.write.save(path=hdfs + '/' + dfn, mode='overwrite')
TMP_MCRM_RET_CUST_FLOW_01.unpersist()
ACRM_F_AG_AGREEMENT.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/TMP_MCRM_RET_CUST_FLOW_01/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert TMP_MCRM_RET_CUST_FLOW_01 lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
#任务[11] 001-02::
V_STEP = V_STEP + 1
sql = """
SELECT A.CUST_ID AS CUST_ID
,A.CUST_ZH_NAME AS CUST_ZH_NAME
,A.CUST_MANAGER AS CUST_MANAGER
,A.CUST_MANAGER_NAME AS CUST_MANAGER_NAME
,A.ORG_ID AS ORG_ID
,A.ORG_NAME AS ORG_NAME
,A.CUST_LEVEL AS CUST_LEVEL
,A.GRADE_DATE AS GRADE_DATE
,B.OPEN_DATE AS OPEN_DATE
,C.CANCEL_DATE AS CANCEL_DATE
,A.MONTH_BAL AS CUST_ASSETS
,A.OLD_CUST_LEVEL AS CUST_LEVEL_FU
,A.ST_DATE AS ST_DATE
,'' AS O_MAIN_TYPE
,'' AS M_MAIN_TYPE
FROM MCRM_RET_CUST_ASSETS A --客户资产情况表
LEFT JOIN TMP_MCRM_RET_CUST_FLOW_01 B --客户流入流出机构统计表临时表01
ON A.CUST_ID = B.CUST_ID
AND B.FR_ID = A.FR_ID
AND SUBSTR(B.OPEN_DATE, 1, 7) = SUBSTR(V_DT, 1, 7)
LEFT JOIN TMP_MCRM_RET_CUST_FLOW_01 C --客户流入流出机构统计表临时表01
ON A.CUST_ID = C.CUST_ID
AND C.FR_ID = A.FR_ID
AND SUBSTR(C.CANCEL_DATE, 1, 7) = SUBSTR(V_DT, 1, 7)
WHERE A.ST_DATE = V_DT """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
MCRM_RET_CUST_FLOW = sqlContext.sql(sql)
MCRM_RET_CUST_FLOW.registerTempTable("MCRM_RET_CUST_FLOW")
dfn="MCRM_RET_CUST_FLOW/"+V_DT+".parquet"
MCRM_RET_CUST_FLOW.cache()
nrows = MCRM_RET_CUST_FLOW.count()
MCRM_RET_CUST_FLOW.write.save(path=hdfs + '/' + dfn, mode='append')
MCRM_RET_CUST_FLOW.unpersist()
MCRM_RET_CUST_ASSETS.unpersist()
TMP_MCRM_RET_CUST_FLOW_01.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert MCRM_RET_CUST_FLOW lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
| gpl-3.0 | 6,747,739,261,989,934,000 | 42.150943 | 177 | 0.580892 | false | 2.453863 | false | true | false |
thomastweets/PythonRSA | GUI_RSA.py | 1 | 15279 | ############################
### GUI for RS analysis ###
############################
import wx
import rsa
import os
import webbrowser
files_number = 0
class RSA_GUI(wx.Frame):
def __init__(self, parent, title):
super(RSA_GUI,self).__init__(parent, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER, title = title, size = (400,275))
self.InitUI()
self.Show(True)
def InitUI(self):
## Creates Status Bar
self.CreateStatusBar()
self.menuBar = wx.MenuBar()
self.filemenu = wx.Menu()
self.helpmenu = wx.Menu()
self.menuHelp = self.helpmenu.Append(wx.ID_ANY, "&Help", "Learn more about RSA and how to use this program")
self.menuAbout = self.helpmenu.Append(wx.ID_ABOUT, "&About", "Learn more about this program")
self.menuClear = self.filemenu.Append(wx.ID_ANY,"&Clear","Clear data")
self.filemenu.AppendSeparator()
self.menuExit = self.filemenu.Append(wx.ID_EXIT, "&Exit", "Terminate the program")
self.menuBar.Append(self.filemenu, "&File")
self.menuBar.Append(self.helpmenu, "&Help")
self.SetMenuBar(self.menuBar)
self.Bind(wx.EVT_MENU, self.OnAbout, self.menuAbout)
self.Bind(wx.EVT_MENU, self.OnHelp, self.menuHelp)
self.Bind(wx.EVT_MENU, self.OnExit, self.menuExit)
self.Bind(wx.EVT_MENU, self.OnClear, self.menuClear)
## buttons
self.panel = wx.Panel(self)
self.main_box = wx.BoxSizer(wx.VERTICAL)
file_box = wx.BoxSizer(wx.HORIZONTAL)
file_button = wx.Button(self.panel, label = 'Select files', size = (90, 30))
file_box.Add(file_button)
self.file_text = wx.TextCtrl(self.panel)
self.file_text.Disable()
file_box.Add(self.file_text, proportion = 1, flag = wx.EXPAND | wx.LEFT, border = 5)
self.main_box.Add(file_box, flag = wx.EXPAND | wx.ALL, border = 10)
self.main_box.Add((-1,10))
label_box = wx.BoxSizer(wx.HORIZONTAL)
label_button = wx.Button(self.panel, label = 'Conditions', size = (90, 30))
label_box.Add(label_button)
self.label_text = wx.TextCtrl(self.panel)
self.label_text.Disable()
label_box.Add(self.label_text, proportion = 1, flag = wx.EXPAND | wx.LEFT, border = 5)
self.main_box.Add(label_box, flag = wx. EXPAND | wx.RIGHT | wx.LEFT, border = 10)
self.main_box.Add((-1,30))
options_box = wx.BoxSizer(wx.HORIZONTAL)
options_button = wx.Button(self.panel, label='Options', size = (70, 30))
options_box.Add(options_button)
self.main_box.Add(options_box, flag = wx.ALIGN_RIGHT | wx.RIGHT, border = 10)
self.main_box.Add((-1,10))
end_box = wx.BoxSizer(wx.HORIZONTAL)
self.go_btn = wx.Button(self.panel, label = 'Go', size = (70, 30))
self.go_btn.Disable()
end_box.Add(self.go_btn, flag = wx.BOTTOM, border = 5)
cancel_btn = wx.Button(self.panel, label = 'Cancel', size = (70, 30))
end_box.Add(cancel_btn, flag = wx.LEFT | wx.BOTTOM, border = 5)
self.main_box.Add(end_box, flag = wx.ALIGN_RIGHT | wx.RIGHT, border = 10)
self.panel.SetSizer(self.main_box)
self.Bind(wx.EVT_BUTTON, self.OnFiles, file_button)
self.Bind(wx.EVT_BUTTON, self.conditions, label_button)
self.Bind(wx.EVT_BUTTON, self.OnOptions, options_button)
self.go_btn.Bind(wx.EVT_BUTTON, self.OnGo)
self.Bind(wx.EVT_BUTTON, self.OnCancel, cancel_btn)
self.labels = []
self.files = []
self.Center()
def OnOptions(self, e):
self.new = OptionWindow(parent=None, id=-1)
self.new.Show()
def OnAbout(self, e):
dlg = wx.MessageDialog(self, "This is a program to perform a representational similarity analysis on functional magnetic resonance imaging data.\n\n"
"The analysis is following the principles described in the paper 'Representational Similarity Analysis - Connecting"
" the Branches of Systems Neuroscience' by Nikolaus Kriegeskorte, Marieke Mur and Peter Bandettini (2008). \n\nIt is the"
" result of a project work at Maastricht University by Pia Schroeder, Amelie Haugg and Julia Brehm under the supervision of Thomas Emmerling."
"\n\nFor correspondence please refer to https://github.com/thomastweets/PythonRSA", "About this program")
dlg.ShowModal()
dlg.Destroy()
def OnHelp(self, e):
webbrowser.open("https://github.com/thomastweets/PythonRSA/blob/master/README.md")
#dlg = wx.MessageDialog(self, "", "Help for this program")
#dlg.ShowModal()
#dlg.Destroy()
def OnExit(self, e):
self.Close(True)
def OnClear(self, e):
self.files = []
self.labels = []
self.file_text.ChangeValue(str(''))
self.label_text.ChangeValue(str(''))
rsa.matrix_plot1 = True
rsa.matrix_plot2 = False
rsa.bar_plot = False
rsa.correlations1 = False
rsa.correlations2 = False
rsa.pvalues = False
rsa.no_relabelings = 10000
rsa.dist_metric = 1
rsa.output_first = True
rsa.output_second = False
rsa.scale_to_max = False
global files_number
files_number = 0
self.go_btn.Disable()
def OnFiles(self, event):
dialog = wx.FileDialog(self, "Choose files:", os.getcwd(), " ","*.vom", wx.FD_OPEN|wx.FD_MULTIPLE)
self.files = []
if dialog.ShowModal() == wx.ID_OK:
self.paths = dialog.GetPaths()
# myfiles contains all the file names
for path in self.paths:
self.files.append(os.path.basename(path).encode("utf-8"))
global files_number
if len(self.files) > 1:
files_number = 1
else:
files_number = 0
if self.files:
self.file_text.ChangeValue(str(', '.join(self.files)))
self.go_btn.Enable()
dialog.Destroy()
def conditions(self, event):
self.textinput = wx.TextEntryDialog(self, "Type in condition names separated by a white space", "Condition labels")
if self.textinput.ShowModal() == wx.ID_OK:
self.input = self.textinput.GetValue()
# labels contains a list of all conditions
self.labels = self.input.split()
self.labels = [label.encode("utf-8") for label in self.labels]
if self.labels:
self.label_text.ChangeValue(str(', '.join(self.labels)))
self.textinput.Destroy()
def OnGo(self, e):
if self.labels == ['Tetris']:
import Tetris
else:
wait = wx.BusyCursor()
rsa.RSA(self.paths, self.files, self.labels)
del wait
def OnCancel(self, e):
self.Close(True)
class OptionWindow(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'Options',
style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER ^ wx.MINIMIZE_BOX ^ wx.MAXIMIZE_BOX,
size=(400,500))
self.InitOpt()
def InitOpt(self):
self.panel = wx.Panel(self)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add((-1,20))
self.line1 = wx.StaticLine(self.panel, wx.ID_ANY, style=wx.LI_VERTICAL)
self.vbox.Add(self.line1, 0, wx.ALL | wx.EXPAND, 5)
self.vbox.Add((-1,10))
# Check box: First-order RDMs
self.RDM1_box = wx.BoxSizer(wx.HORIZONTAL)
self.RDM1_cb = wx.CheckBox(self.panel, label = 'First order RDMs')
self.RDM1_cb.SetValue(rsa.output_first)
self.RDM1_cb.Bind(wx.EVT_CHECKBOX, self.OnSelectRDM1)
self.RDM1_box.Add(self.RDM1_cb)
self.vbox.Add(self.RDM1_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: Matrix plots
self.mplot1_box = wx.BoxSizer(wx.HORIZONTAL)
self.mplot1_box.Add((25,-1))
self.mplot1_cb = wx.CheckBox(self.panel, label = 'Matrix plots')
self.mplot1_cb.SetValue(rsa.matrix_plot1)
self.mplot1_box.Add(self.mplot1_cb)
self.vbox.Add(self.mplot1_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: First-order correlations
self.correlations1_box = wx.BoxSizer(wx.HORIZONTAL)
self.correlations1_box.Add((25,-1))
self.correlations1_cb = wx.CheckBox(self.panel, label = 'Correlations')
self.correlations1_cb.SetValue(rsa.correlations1)
self.correlations1_box.Add(self.correlations1_cb)
self.vbox.Add(self.correlations1_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: Scale to maximum distance
self.scale_box = wx.BoxSizer(wx.HORIZONTAL)
self.scale_box.Add((25,-1))
self.scale_cb = wx.CheckBox(self.panel, label='Scale to max')
self.scale_cb.SetValue(rsa.scale_to_max)
self.scale_box.Add(self.scale_cb)
self.vbox.Add(self.scale_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Dropdown menu for distance metric
self.drop_box = wx.BoxSizer(wx.HORIZONTAL)
self.drop_box.Add((25,-1))
self.drop_label = wx.StaticText(self.panel, label = 'Distance metric ')
self.drop_box.Add(self.drop_label)
self.distances = ['Correlation distance', 'Euclidean distance', 'Absolute activation difference']
self.dropdown = wx.ComboBox(self.panel, value = self.distances[rsa.dist_metric-1], choices = self.distances, style=wx.CB_READONLY)
self.drop_box.Add(self.dropdown)
self.vbox.Add(self.drop_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,20))
self.line2 = wx.StaticLine(self.panel, wx.ID_ANY, style=wx.LI_VERTICAL)
self.vbox.Add(self.line2, 0, wx.ALL | wx.EXPAND, 5)
self.vbox.Add((-1,10))
# Check box: Second-order RDM
self.RDM2_box = wx.BoxSizer(wx.HORIZONTAL)
self.RDM2_cb = wx.CheckBox(self.panel, label = 'Second order RDMs')
self.RDM2_cb.SetValue(rsa.output_second)
self.RDM2_cb.Bind(wx.EVT_CHECKBOX, self.OnSelectRDM2)
self.RDM2_box.Add(self.RDM2_cb)
self.vbox.Add(self.RDM2_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# only checkable if you have chosen enough files
self.RDM2_cb.Disable()
if files_number == 1:
self.RDM2_cb.Enable()
# Check box: Matrix plots
self.mplot2_box = wx.BoxSizer(wx.HORIZONTAL)
self.mplot2_box.Add((25,-1))
self.mplot2_cb = wx.CheckBox(self.panel, label = 'Matrix plots')
self.mplot2_cb.SetValue(rsa.matrix_plot2)
self.mplot2_box.Add(self.mplot2_cb)
self.vbox.Add(self.mplot2_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: Bar plots
self.bplot_box = wx.BoxSizer(wx.HORIZONTAL)
self.bplot_box.Add((25,-1))
self.bplot_cb = wx.CheckBox(self.panel, label = 'Bar plots')
self.bplot_cb.SetValue(rsa.bar_plot)
self.bplot_box.Add(self.bplot_cb)
self.vbox.Add(self.bplot_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: Second-order correlations
self.correlations2_box = wx.BoxSizer(wx.HORIZONTAL)
self.correlations2_box.Add((25,-1))
self.correlations2_cb = wx.CheckBox(self.panel, label = 'Correlations')
self.correlations2_cb.SetValue(rsa.correlations2)
self.correlations2_box.Add(self.correlations2_cb)
self.vbox.Add(self.correlations2_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: p-values
self.p_box = wx.BoxSizer(wx.HORIZONTAL)
self.p_box.Add((25,-1))
self.p_cb = wx.CheckBox(self.panel, label='p-values')
self.p_cb.SetValue(rsa.pvalues)
self.p_box.Add(self.p_cb)
self.vbox.Add(self.p_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# No of permutations SpinControl
self.perm_box = wx.BoxSizer(wx.HORIZONTAL)
self.perm_box.Add((25,-1))
self.perm_label = wx.StaticText(self.panel, label = 'No. of Permutations ')
self.perm_box.Add(self.perm_label)
self.perm_spin = wx.SpinCtrl(self.panel, value=str(rsa.no_relabelings), min=100, max = 100000)
self.perm_box.Add(self.perm_spin, proportion = 1)
self.vbox.Add(self.perm_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
self.line3 = wx.StaticLine(self.panel, wx.ID_ANY, style=wx.LI_VERTICAL)
self.vbox.Add(self.line3, 0, wx.ALL | wx.EXPAND, 5)
self.vbox.Add((-1,50))
# Dis-/Enable options
self.OnSelectRDM1([])
self.OnSelectRDM2([])
# Done and Cancel Buttons
self.end_box = wx.BoxSizer(wx.HORIZONTAL)
self.done_btn = wx.Button(self.panel, label = 'Done', size = (70, 30))
self.done_btn.Bind(wx.EVT_BUTTON, self.OnDone)
self.end_box.Add(self.done_btn, flag = wx.BOTTOM, border = 5)
self.cancel_btn = wx.Button(self.panel, label = 'Cancel', size = (70, 30))
self.cancel_btn.Bind(wx.EVT_BUTTON, self.OnCancel)
self.end_box.Add(self.cancel_btn, flag = wx.LEFT | wx.BOTTOM, border = 5)
self.vbox.Add(self.end_box, flag = wx.ALIGN_RIGHT | wx.RIGHT, border = 10)
self.panel.SetSizer(self.vbox)
self.Center()
def OnSelectRDM1(self,e):
if self.RDM1_cb.GetValue():
self.mplot1_cb.Enable()
self.correlations1_cb.Enable()
self.scale_cb.Enable()
self.dropdown.Enable()
else:
self.mplot1_cb.Disable()
self.correlations1_cb.Disable()
self.scale_cb.Disable()
self.dropdown.Disable()
def OnSelectRDM2(self,e):
if self.RDM2_cb.GetValue() and files_number == 1:
self.bplot_cb.Enable()
self.mplot2_cb.Enable()
self.p_cb.Enable()
self.correlations2_cb.Enable()
self.perm_spin.Enable()
else:
self.bplot_cb.Disable()
self.p_cb.Disable()
self.perm_spin.Disable()
self.mplot2_cb.Disable()
self.correlations2_cb.Disable()
def OnDone(self,e):
rsa.output_first = self.RDM1_cb.GetValue()
rsa.output_second = self.RDM2_cb.GetValue()
rsa.matrix_plot1 = self.mplot1_cb.GetValue()
rsa.matrix_plot2 = self.mplot2_cb.GetValue()
rsa.bar_plot = self.bplot_cb.GetValue()
rsa.correlations1 = self.correlations1_cb.GetValue()
rsa.correlations2 = self.correlations2_cb.GetValue()
rsa.pvalues = self.p_cb.GetValue()
rsa.scale_to_max = self.scale_cb.GetValue()
rsa.no_relabelings = self.perm_spin.GetValue()
rsa.dist_metric = self.dropdown.GetSelection()+1
self.Close()
def OnCancel(self,e):
self.Close()
def main():
GUI = wx.App()
RSA_GUI(None, 'RSA')
GUI.MainLoop()
if __name__ == '__main__':
main()
| gpl-2.0 | 8,023,130,351,131,065,000 | 35.995157 | 179 | 0.59729 | false | 3.290052 | false | false | false |
Vijfhoek/oyoyo | oyoyo/cmdhandler.py | 1 | 6875 | # Copyright (c) 2008 Duncan Fordyce
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import inspect
import logging
import sys
import traceback
from oyoyo import helpers
from oyoyo.parse import parse_nick
# Python < 3 compatibility
if sys.version_info < (3,):
class bytes(object):
def __new__(self, b='', encoding='utf8'):
return str(b)
def protected(func):
""" decorator to protect functions from being called """
func.protected = True
return func
class CommandError(Exception):
def __init__(self, cmd):
self.cmd = cmd
class NoSuchCommandError(CommandError):
def __str__(self):
return 'No such command "%s"' % ".".join(self.cmd)
class ProtectedCommandError(CommandError):
def __str__(self):
return 'Command "%s" is protected' % ".".join(self.cmd)
class CommandHandler(object):
""" The most basic CommandHandler """
def __init__(self, client):
self.client = client
@protected
def get(self, in_command_parts):
""" finds a command
commands may be dotted. each command part is checked that it does
not start with and underscore and does not have an attribute
"protected". if either of these is true, ProtectedCommandError
is raised.
its possible to pass both "command.sub.func" and
["command", "sub", "func"].
"""
if isinstance(in_command_parts, (str, bytes)):
in_command_parts = in_command_parts.split(bytes('.', 'ascii'))
command_parts = in_command_parts[:]
p = self
while command_parts:
cmd = command_parts.pop(0).decode('ascii')
if cmd.startswith('_'):
raise ProtectedCommandError(in_command_parts)
try:
f = getattr(p, cmd)
except AttributeError:
raise NoSuchCommandError(in_command_parts)
if hasattr(f, 'protected'):
raise ProtectedCommandError(in_command_parts)
if isinstance(f, CommandHandler) and command_parts:
return f.get(command_parts)
p = f
return f
@protected
def run(self, command, *args):
""" finds and runs a command """
logging.debug("processCommand %s(%s)" % (command, args))
try:
f = self.get(command)
except NoSuchCommandError:
self.__unhandled__(command, *args)
return
logging.debug('f %s' % f)
try:
f(*args)
except Exception, e:
logging.error('command raised %s' % e)
logging.error(traceback.format_exc())
raise CommandError(command)
@protected
def __unhandled__(self, cmd, *args):
"""The default handler for commands. Override this method to
apply custom behavior (example, printing) unhandled commands.
"""
logging.debug('unhandled command %s(%s)' % (cmd, args))
class DefaultCommandHandler(CommandHandler):
""" CommandHandler that provides methods for the normal operation of IRC.
If you want your bot to properly respond to pings, etc, you should subclass this.
"""
def ping(self, prefix, server):
self.client.send('PONG', server)
class DefaultBotCommandHandler(CommandHandler):
""" default command handler for bots. methods/attributes are made
available as commands """
@protected
def getVisibleCommands(self, obj=None):
test = (lambda x: isinstance(x, CommandHandler) or \
inspect.ismethod(x) or inspect.isfunction(x))
members = inspect.getmembers(obj or self, test)
return [m for m, _ in members
if (not m.startswith('_') and
not hasattr(getattr(obj, m), 'protected'))]
def help(self, sender, dest, arg=None):
"""list all available commands or get help on a specific command"""
logging.info('help sender=%s dest=%s arg=%s' % (sender, dest, arg))
if not arg:
commands = self.getVisibleCommands()
commands.sort()
helpers.msg(self.client, dest,
"available commands: %s" % " ".join(commands))
else:
try:
f = self.get(arg)
except CommandError, e:
helpers.msg(self.client, dest, str(e))
return
doc = f.__doc__.strip() if f.__doc__ else "No help available"
if not inspect.ismethod(f):
subcommands = self.getVisibleCommands(f)
if subcommands:
doc += " [sub commands: %s]" % " ".join(subcommands)
helpers.msg(self.client, dest, "%s: %s" % (arg, doc))
class BotCommandHandler(DefaultCommandHandler):
""" complete command handler for bots """
def __init__(self, client, command_handler):
DefaultCommandHandler.__init__(self, client)
self.command_handler = command_handler
def privmsg(self, prefix, dest, msg):
self.tryBotCommand(prefix, dest, msg)
@protected
def tryBotCommand(self, prefix, dest, msg):
""" tests a command to see if its a command for the bot, returns True
and calls self.processBotCommand(cmd, sender) if its is.
"""
logging.debug("tryBotCommand('%s' '%s' '%s')" % (prefix, dest, msg))
if dest == self.client.nick:
dest = parse_nick(prefix)[0]
elif msg.startswith(self.client.nick):
msg = msg[len(self.client.nick)+1:]
else:
return False
msg = msg.strip()
parts = msg.split(' ', 1)
command = parts[0]
arg = parts[1:]
try:
self.command_handler.run(command, prefix, dest, *arg)
except CommandError, e:
helpers.msg(self.client, dest, str(e))
return True
| mit | -8,005,940,691,711,680,000 | 31.429245 | 85 | 0.613236 | false | 4.291511 | false | false | false |
RIKSOF/scspell-jenkins | scspell_lib/_util.py | 1 | 1481 | ############################################################################
# scspell
# Copyright (C) 2009 Paul Pelzl
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
############################################################################
"""
_util -- utility functions which may be useful across the source tree.
"""
# Settings for this session
VERBOSITY_NORMAL = 1
VERBOSITY_DEBUG = 2
VERBOSITY_MAX = VERBOSITY_DEBUG
SETTINGS = {'verbosity' : VERBOSITY_NORMAL}
def mutter(level, text):
"""Print text to the console, if the level is not higher than the
current verbosity setting."""
if level <= SETTINGS['verbosity']:
print text
def set_verbosity(value):
"""Set the verbosity level to a given integral value. The constants
VERBOSITY_* are good choices."""
SETTINGS['verbosity'] = value
# scspell-id: b114984a-c7aa-40a8-9a53-b54fb6a52582
| gpl-2.0 | 4,990,107,826,805,204,000 | 32.659091 | 76 | 0.654288 | false | 3.991914 | false | false | false |
robotgear/robotgear | robotgear/settings.py | 1 | 3588 | """
Django settings for robotgear project.
Generated by 'django-admin startproject' using Django 1.11.7.
"""
import os
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'semanticuiforms',
'django_q',
'users',
'teams',
'posts'
]
try:
env = os.environ['ROBOTGEAR_ENV']
except KeyError:
env = 'DEBUG'
if env == 'DEBUG':
DEBUG = True
SECRET_KEY = '1$(%%u4n_(w%@6u&2%lgm^93-in4%8t&pd=o)0c_d(_n7(u&#@'
ALLOWED_HOSTS = []
INSTALLED_APPS += ['debug_toolbar', ]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'db',
'PORT': '5432',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
elif env == 'PROD':
pass
elif env == 'TEST':
pass
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Application definition
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware'
]
ROOT_URLCONF = 'robotgear.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages'
],
},
},
]
WSGI_APPLICATION = 'robotgear.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Configure custom user model
AUTH_USER_MODEL = 'users.User'
INTERNAL_IPS = '127.0.0.1'
LOGIN_URL = 'login'
Q_CLUSTER = {
'name': 'robotgear',
'workers': 2,
'recycle': 500,
'catch_up': False,
"ack_failures": True,
'retry': 100000,
'label': 'Task Queue',
'orm': 'default'
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'unix:/tmp/memcached.sock',
}
}
| mit | 421,187,835,858,788,700 | 21.566038 | 91 | 0.62709 | false | 3.410646 | false | false | false |
commonsense/divisi | csc/divisi/flavors.py | 1 | 5345 | from csc.divisi.tensor import DictTensor
from csc.divisi.ordered_set import OrderedSet
from csc.divisi.labeled_view import LabeledView
def add_triple_to_matrix(matrix, triple, value=1.0):
'''
Adds a triple (left, relation, right) to the matrix in the 2D unfolded format.
This is the new add_assertion_tuple.
'''
left, relation, right = triple
lfeature = ('left', relation, left)
rfeature = ('right', relation, right)
matrix.inc((left, rfeature), value)
matrix.inc((right, lfeature), value)
def set_triple_in_matrix(matrix, triple, value=1.0):
''' Sets a triple (left, relation, right) in the matrix in the 2D
unfolded format to the specified value.
'''
left, relation, right = triple
lfeature = ('left', relation, left)
rfeature = ('right', relation, right)
matrix[left, rfeature] = value
matrix[right, lfeature] = value
###
### Assertion Tensors
###
class AssertionTensor(LabeledView):
'''
All AssertionTensors have the following functions:
.add_triple(triple, value)
.set_triple(triple, value)
.add_identity(text, value=1.0, relation='Identity')
where triple is (concept1, relation, concept2).
They also have the convenience classmethod from_triples.
'''
def add_identity(self, text, value=1.0, relation='Identity'):
self.add_triple((text, relation, text), value)
def bake(self):
'''
Simplify the representation.
'''
return LabeledView(self.tensor, self._labels)
def add_triples(self, triples, accumulate=True, constant_weight=None):
if accumulate: add = self.add_triple
else: add = self.set_triple
if constant_weight:
for triple in triples:
add(triple, constant_weight)
else:
for triple, weight in triples:
add(triple, weight)
@classmethod
def from_triples(cls, triples, accumulate=True, constant_weight=None):
mat = cls()
mat.add_triples(triples, accumulate, constant_weight)
return mat
def add_identities(self, value=1.0, relation='Identity'):
if not value: return # 0 or False means not to actually add identities.
for concept in self.concepts():
self.add_triple((concept, relation, concept), value)
class ConceptByFeatureMatrix(AssertionTensor):
'''
This is the typical AnalogySpace matrix. It stores each assertion
twice: once as (c1, ('right', rel, c2)) and once as (c2, ('left',
rel, c1)).
This class is a convenience for building matrices in this
format. Once you've add_triple'sed everything, you can call
.bake() to convert it back to a plain old LabeledView of a
DictTensor, just like make_sparse_labeled_tensor does.
'''
def __init__(self):
super(ConceptByFeatureMatrix, self).__init__(
DictTensor(2), [OrderedSet() for _ in '01'])
add_triple = add_triple_to_matrix
set_triple = set_triple_in_matrix
def concepts(self): return self.label_list(0)
class FeatureByConceptMatrix(AssertionTensor):
'''
A transposed ConceptByFeatureMatrix; see it for documentation.
'''
def __init__(self):
super(FeatureByConceptMatrix, self).__init__(
DictTensor(2), [OrderedSet() for _ in '01'])
def add_triple(self, triple, value=1.0):
left, relation, right = triple
lfeature = ('left', relation, left)
rfeature = ('right', relation, right)
self.inc((rfeature, left), value)
self.inc((lfeature, right), value)
def set_triple(self, triple, value=1.0):
left, relation, right = triple
lfeature = ('left', relation, left)
rfeature = ('right', relation, right)
self[rfeature, left] = value
self[lfeature, right] = value
def concepts(self): return self.label_list(1)
class ConceptRelationConceptTensor(AssertionTensor):
'''
This is a straightforward encoding of concepts as a 3D tensor.
'''
def __init__(self):
# FIXME: yes this saves space, but it might make a row or column be zero.
concepts, relations = OrderedSet(), OrderedSet()
super(ConceptRelationConceptTensor, self).__init__(
DictTensor(3), [concepts, relations, concepts])
def concepts(self): return self.label_list(0)
def add_triple(self, triple, value=1.0):
left, relation, right = triple
self.inc((left, relation, right), value)
def set_triple(self, triple, value=1.0):
left, relation, right = triple
self[left, relation, right] = value
class MirroringCRCTensor(ConceptRelationConceptTensor):
'''
Every assertion (c1, r, c2) in this tensor has an inverse,
(c2, r', c1).
This is analogous to how the 2D tensor makes left and right features.
Inverse relations are constructed from ordinary relations by
prefixing a '-'.
'''
def add_triple(self, triple, value=1.0):
left, relation, right = triple
self.inc((left, relation, right), value) # normal
self.inc((right, '-'+relation, left), value) # inverse
def set_triple(self, triple, value=1.0):
left, relation, right = triple
self[left, relation, right] = value
self[left, '-'+relation, right] = value
| gpl-3.0 | -1,865,229,490,018,606,000 | 32.198758 | 82 | 0.637605 | false | 3.80427 | false | false | false |
moyaproject/moya | moya/elements/registry.py | 1 | 4712 | from __future__ import unicode_literals
from .. import errors
from ..tools import extract_namespace
from .. import namespaces
from ..compat import itervalues
from collections import defaultdict
import inspect
class Meta(object):
logic_skip = False
virtual_tag = False
is_call = False
is_try = False
is_loop = False
app_first_arg = False
text_nodes = None
trap_exceptions = False
translate = False
class ElementRegistry(object):
default_registry = None
_registry_stack = []
def clear(self):
self._registry.clear()
self._dynamic_elements.clear()
del self._registry_stack[:]
@classmethod
def push_registry(cls, registry):
cls._registry_stack.append(registry)
@classmethod
def pop_registry(cls):
cls._registry_stack.pop()
@classmethod
def get_default(cls):
return cls._registry_stack[-1]
def __init__(self, update_from_default=True):
self._registry = defaultdict(dict)
self._dynamic_elements = {}
if update_from_default:
self._registry.update(self.default_registry._registry)
self._dynamic_elements.update(self.default_registry._dynamic_elements)
def clone(self):
"""Return a copy of this registry"""
registry = ElementRegistry(update_from_default=False)
registry._registry = self._registry.copy()
registry._dynamic_elements = self._dynamic_elements.copy()
return registry
def set_default(self):
"""Reset this registry to the default registry (before project loaded)"""
self._registry = self.default_registry._registry.copy()
self._dynamic_elements = self.default_registry._dynamic_elements.copy()
def register_element(self, xmlns, name, element):
"""Add a dynamic element to the element registry"""
xmlns = xmlns or namespaces.run
if name in self._registry[xmlns]:
element_class = self._registry[xmlns][name]
definition = getattr(element_class, "_location", None)
if definition is None:
definition = inspect.getfile(element_class)
if xmlns:
raise errors.ElementError(
'<{}> already registered in "{}" for xmlns "{}"'.format(
name, definition, xmlns
),
element=getattr(element, "element", element),
)
else:
raise errors.ElementError(
'<{}/> already registered in "{}"'.format(name, definition),
element=element,
)
self._registry[xmlns][name] = element
def add_dynamic_registry(self, xmlns, element_callable):
"""Add a dynamic registry (element factory)"""
self._dynamic_elements[xmlns] = element_callable
def clear_registry(self):
"""Clear the registry (called on archive reload)"""
self._registry.clear()
def get_elements_in_xmlns(self, xmlns):
"""Get all elements defined within a given namespace"""
return self._registry.get(xmlns, {})
def get_elements_in_lib(self, long_name):
"""Get all elements defined by a given library"""
lib_elements = []
for namespace in itervalues(self._registry):
lib_elements.extend(
element
for element in itervalues(namespace)
if element._lib_long_name == long_name
)
return lib_elements
def get_element_type(self, xmlns, name):
"""Get an element by namespace and name"""
if xmlns in self._dynamic_elements:
return self._dynamic_elements[xmlns](name)
return self._registry.get(xmlns, {}).get(name, None)
def find_xmlns(self, name):
"""Find the xmlns with contain a given tag, or return None"""
for xmlns in sorted(self._registry.keys()):
if name in self._registry[xmlns]:
return xmlns
return None
def check_namespace(self, xmlns):
"""Check if a namespace exists in the registry"""
return xmlns in self._registry
def set_registry(self, registry):
"""Restore a saved registry"""
self._registry = registry._registry.copy()
self._dynamic_elements = registry._dynamic_elements.copy()
def get_tag(self, tag):
"""Get a tag from it's name (in Clarke's notation)"""
return self.get_element_type(*extract_namespace(tag))
default_registry = ElementRegistry.default_registry = ElementRegistry(
update_from_default=False
)
ElementRegistry.push_registry(ElementRegistry.default_registry)
| mit | -1,306,243,355,464,368,600 | 31.951049 | 82 | 0.610781 | false | 4.543877 | false | false | false |
cmcqueen/simplerandom | python/python3/simplerandom/iterators/_iterators_py.py | 1 | 40947 |
from simplerandom._bitcolumnmatrix import BitColumnMatrix
__all__ = [
"Cong",
"SHR3",
"MWC1",
"MWC2",
"MWC64",
"KISS",
"KISS2",
"LFSR113",
"LFSR88",
"_traverse_iter",
]
def _traverse_iter(o, tree_types=(list, tuple)):
"""Iterate over nested containers and/or iterators.
This allows generator __init__() functions to be passed seeds either as
a series of arguments, or as a list/tuple.
"""
SIMPLERANDOM_BITS = 32
SIMPLERANDOM_MOD = 2**SIMPLERANDOM_BITS
SIMPLERANDOM_MASK = SIMPLERANDOM_MOD - 1
if isinstance(o, tree_types) or getattr(o, '__iter__', False):
for value in o:
for subvalue in _traverse_iter(value):
while True:
yield subvalue & SIMPLERANDOM_MASK
subvalue >>= SIMPLERANDOM_BITS
# If value is negative, then it effectively has infinitely extending
# '1' bits (modelled as a 2's complement representation). So when
# right-shifting it, it will eventually get to -1, and any further
# right-shifting will not change it.
if subvalue == 0 or subvalue == -1:
break
else:
yield o
def _repeat_iter(input_iter):
"""Iterate over the input iter values. Then repeat the last value
indefinitely. This is useful to repeat seed values when an insufficient
number of seeds are provided.
E.g. KISS(1) effectively becomes KISS(1, 1, 1, 1), rather than (if we just
used default values) KISS(1, default-value, default-value, default-value)
It is better to repeat the last seed value, rather than just using default
values. Given two generators seeded with an insufficient number of seeds,
repeating the last seed value means their states are more different from
each other, with less correlation between their generated outputs.
"""
last_value = None
for value in input_iter:
last_value = value
yield value
if last_value is not None:
while True:
yield last_value
def _next_seed_int32_or_default(seed_iter, default_value):
try:
seed_item = next(seed_iter)
except StopIteration:
return default_value
else:
if seed_item is None:
return default_value
else:
return (int(seed_item) & 0xFFFFFFFF)
def _geom_series_uint32(r, n):
"""Unsigned integer calculation of sum of geometric series:
1 + r + r^2 + r^3 + ... r^(n-1)
summed to n terms.
Calculated modulo 2**32.
Use the formula (r**n - 1) / (r - 1)
"""
if n == 0:
return 0
if n == 1 or r == 0:
return 1
m = 2**32
# Split (r - 1) into common factors with the modulo 2**32 -- i.e. all
# factors of 2; and other factors which are coprime with the modulo 2**32.
other_factors = r - 1
common_factor = 1
while (other_factors % 2) == 0:
other_factors //= 2
common_factor *= 2
other_factors_inverse = pow(other_factors, m - 1, m)
numerator = pow(r, n, common_factor * m) - 1
return (numerator // common_factor * other_factors_inverse) % m
class Cong(object):
'''Congruential random number generator
This is a congruential generator with the widely used
69069 multiplier: x[n]=69069x[n-1]+12345. It has
period 2**32.
The leading half of its 32 bits seem to pass tests,
but bits in the last half are too regular. It fails
tests for which those bits play a significant role.
But keep in mind that it is a rare application for
which the trailing bits play a significant role. Cong
is one of the most widely used generators of the last
30 years, as it was the system generator for VAX and
was incorporated in several popular software packages,
all seemingly without complaint.
'''
SIMPLERANDOM_MOD = 2**32
SIMPLERANDOM_MAX = 2**32 - 1
CONG_CYCLE_LEN = 2**32
CONG_MULT = 69069
CONG_CONST = 12345
@staticmethod
def min():
return 0
@staticmethod
def max():
return Cong.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
self.cong = _next_seed_int32_or_default(seed_iter, 0)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
pass
def __next__(self):
self.cong = (69069 * self.cong + 12345) & 0xFFFFFFFF
return self.cong
def current(self):
return self.cong
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
self.cong ^= value_int
next(self)
return self.cong
def __iter__(self):
return self
def getstate(self):
return (self.cong, )
def setstate(self, state):
(self.cong, ) = (int(val) & 0xFFFFFFFF for val in state)
def jumpahead(self, n):
# Cong.jumpahead(n) = r**n * x mod 2**32 +
# c * (1 + r + r**2 + ... + r**(n-1)) mod 2**32
# where r = 69069 and c = 12345.
#
# The part c * (1 + r + r**2 + ... + r**(n-1)) is a geometric series.
# For calculating geometric series mod 2**32, see:
# http://www.codechef.com/wiki/tutorial-just-simple-sum#Back_to_the_geometric_series
n = int(n) % self.CONG_CYCLE_LEN
mult_exp = pow(self.CONG_MULT, n, self.SIMPLERANDOM_MOD)
add_const = (_geom_series_uint32(self.CONG_MULT, n) * self.CONG_CONST) & 0xFFFFFFFF
self.cong = (mult_exp * self.cong + add_const) & 0xFFFFFFFF
def __repr__(self):
return self.__class__.__name__ + "(" + repr(int(self.cong)) + ")"
class SHR3(object):
'''3-shift-register random number generator
SHR3 is a 3-shift-register generator with period
2**32-1. It uses y[n]=y[n-1](I+L^13)(I+R^17)(I+L^5),
with the y's viewed as binary vectors, L the 32x32
binary matrix that shifts a vector left 1, and R its
transpose.
SHR3 seems to pass all except those related to the
binary rank test, since 32 successive values, as
binary vectors, must be linearly independent, while
32 successive truly random 32-bit integers, viewed
as binary vectors, will be linearly independent only
about 29% of the time.
'''
SIMPLERANDOM_MOD = 2**32
SIMPLERANDOM_MAX = 2**32 - 1
SHR3_CYCLE_LEN = 2**32 - 1
_SHR3_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,13)
_SHR3_MATRIX_b = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,-17)
_SHR3_MATRIX_c = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,5)
_SHR3_MATRIX = _SHR3_MATRIX_c * _SHR3_MATRIX_b * _SHR3_MATRIX_a
@staticmethod
def min():
return 1
@staticmethod
def max():
return SHR3.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
self.shr3 = _next_seed_int32_or_default(seed_iter, 0xFFFFFFFF)
self.sanitise()
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
if self.shr3 == 0:
# 0 is a bad seed. Invert to get a good seed.
self.shr3 = 0xFFFFFFFF
def __next__(self):
shr3 = self.shr3
shr3 ^= (shr3 & 0x7FFFF) << 13
shr3 ^= shr3 >> 17
shr3 ^= (shr3 & 0x7FFFFFF) << 5
self.shr3 = shr3
return shr3
def current(self):
return self.shr3
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
self.shr3 ^= value_int
self.sanitise()
next(self)
return self.shr3
def __iter__(self):
return self
def getstate(self):
return (self.shr3, )
def setstate(self, state):
(self.shr3, ) = (int(val) & 0xFFFFFFFF for val in state)
self.sanitise()
def jumpahead(self, n):
n = int(n) % self.SHR3_CYCLE_LEN
shr3 = pow(self._SHR3_MATRIX, n) * self.shr3
self.shr3 = shr3
def __repr__(self):
return self.__class__.__name__ + "(" + repr(int(self.shr3)) + ")"
class MWC2(object):
'''"Multiply-with-carry" random number generator
Very similar to MWC1, except that it concatenates the
two 16-bit MWC generators differently. The 'x'
generator is rotated 16 bits instead of just shifted
16 bits.
This gets much better test results than MWC1 in
L'Ecuyer's TestU01 test suite, so it should probably
be preferred.
'''
SIMPLERANDOM_MAX = 2**32 - 1
_MWC_UPPER_MULT = 36969
_MWC_LOWER_MULT = 18000
_MWC_UPPER_MODULO = _MWC_UPPER_MULT * 2**16 - 1
_MWC_LOWER_MODULO = _MWC_LOWER_MULT * 2**16 - 1
_MWC_UPPER_CYCLE_LEN = _MWC_UPPER_MULT * 2**16 // 2 - 1
_MWC_LOWER_CYCLE_LEN = _MWC_LOWER_MULT * 2**16 // 2 - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return MWC2.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.mwc_upper = _next_seed_int32_or_default(repeat_seed_iter, 0xFFFFFFFF)
self.mwc_lower = _next_seed_int32_or_default(repeat_seed_iter, 0xFFFFFFFF)
self.sanitise()
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
self._sanitise_upper()
self._sanitise_lower()
def _sanitise_upper(self):
mwc_upper_orig = self.mwc_upper
# There are a few bad states--that is, any multiple of
# _MWC_UPPER_MODULO -- that is 0x9068FFFF (which is 36969 * 2**16 - 1).
sanitised_value = mwc_upper_orig % 0x9068FFFF
if sanitised_value == 0:
# Invert to get a good seed.
sanitised_value = (mwc_upper_orig ^ 0xFFFFFFFF) % 0x9068FFFF
self.mwc_upper = sanitised_value
def _sanitise_lower(self):
mwc_lower_orig = self.mwc_lower
# There are a few bad states--that is, any multiple of
# _MWC_LOWER_MODULO -- that is 0x464FFFFF (which is 18000 * 2**16 - 1).
sanitised_value = mwc_lower_orig % 0x464FFFFF
if sanitised_value == 0:
# Invert to get a good seed.
sanitised_value = (mwc_lower_orig ^ 0xFFFFFFFF) % 0x464FFFFF
self.mwc_lower = sanitised_value
def _next_upper(self):
self.mwc_upper = 36969 * (self.mwc_upper & 0xFFFF) + (self.mwc_upper >> 16)
def _next_lower(self):
self.mwc_lower = 18000 * (self.mwc_lower & 0xFFFF) + (self.mwc_lower >> 16)
def __next__(self):
# Note: this is apparently equivalent to:
# self.mwc_upper = (36969 * self.mwc_upper) % 0x9068FFFF
# self.mwc_lower = (18000 * self.mwc_lower) % 0x464FFFFF
# See Random Number Generation, Pierre L’Ecuyer, section 3.6 Linear Recurrences With Carry
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.136.6898&rep=rep1&type=pdf
self.mwc_upper = 36969 * (self.mwc_upper & 0xFFFF) + (self.mwc_upper >> 16)
self.mwc_lower = 18000 * (self.mwc_lower & 0xFFFF) + (self.mwc_lower >> 16)
return self.current() # call self.current() so that MWC1 can over-ride it
def current(self):
return (((self.mwc_upper & 0xFFFF) << 16) + (self.mwc_upper >> 16) + self.mwc_lower) & 0xFFFFFFFF
mwc = property(current) # Note that this must be over-ridden again in MWC1
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 24) & 0x1
if selector == 0:
self.mwc_upper ^= value_int
self._sanitise_upper()
self._next_upper()
else:
self.mwc_lower ^= value_int
self._sanitise_lower()
self._next_lower()
return self.current()
def __iter__(self):
return self
def getstate(self):
return (self.mwc_upper, self.mwc_lower)
def setstate(self, state):
(self.mwc_upper, self.mwc_lower) = (int(val) & 0xFFFFFFFF for val in state)
self.sanitise()
def jumpahead(self, n):
# See next() note on functional equivalence.
n_upper = int(n) % self._MWC_UPPER_CYCLE_LEN
self.mwc_upper = pow(self._MWC_UPPER_MULT, n_upper, self._MWC_UPPER_MODULO) * self.mwc_upper % self._MWC_UPPER_MODULO
n_lower = int(n) % self._MWC_LOWER_CYCLE_LEN
self.mwc_lower = pow(self._MWC_LOWER_MULT, n_lower, self._MWC_LOWER_MODULO) * self.mwc_lower % self._MWC_LOWER_MODULO
def __repr__(self):
return self.__class__.__name__ + "(" + repr(int(self.mwc_upper)) + "," + repr(int(self.mwc_lower)) + ")"
class MWC1(MWC2):
'''"Multiply-with-carry" random number generator
This is the MWC as defined in Marsaglia's 1999
newsgroup post.
This uses two MWC generators to generate high and
low 16-bit parts, which are then combined to make a
32-bit value.
The MWC generator concatenates two 16-bit multiply-
with-carry generators:
x[n]=36969x[n-1]+carry,
y[n]=18000y[n-1]+carry mod 2**16,
It has a period about 2**60.
This seems to pass all Marsaglia's Diehard tests.
However, it fails many of L'Ecuyer's TestU01
tests. The modified MWC2 generator passes many more
tests in TestU01, and should probably be preferred,
unless backwards compatibility is required.
'''
def current(self):
return (((self.mwc_upper & 0xFFFF) << 16) + self.mwc_lower) & 0xFFFFFFFF
# We have to over-ride this again, because of the way property() works.
mwc = property(current)
class MWC64(object):
'''"Multiply-with-carry" random number generator
This uses a single MWC generator with 64 bits to
generate a 32-bit value. The seeds should be 32-bit
values.
'''
SIMPLERANDOM_MAX = 2**32 - 1
_MWC64_MULT = 698769069
_MWC64_MODULO = _MWC64_MULT * 2**32 - 1
_MWC64_CYCLE_LEN = _MWC64_MULT * 2**32 // 2 - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return MWC64.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.mwc_upper = _next_seed_int32_or_default(repeat_seed_iter, 0xFFFFFFFF)
self.mwc_lower = _next_seed_int32_or_default(repeat_seed_iter, 0xFFFFFFFF)
self.sanitise()
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
state64 = (self.mwc_upper << 32) + self.mwc_lower
temp = state64
was_changed = False
# There are a few bad seeds--that is, seeds that are a multiple of
# 0x29A65EACFFFFFFFF (which is 698769069 * 2**32 - 1).
if state64 >= 0x29A65EACFFFFFFFF:
was_changed = True
temp = state64 % 0x29A65EACFFFFFFFF
if temp == 0:
# Invert to get a good seed.
temp = (state64 ^ 0xFFFFFFFFFFFFFFFF) % 0x29A65EACFFFFFFFF
was_changed = True
if was_changed:
self.mwc_upper = temp >> 32
self.mwc_lower = temp & 0xFFFFFFFF
def __next__(self):
# Note: this is apparently equivalent to:
# temp64 = (self.mwc_upper << 32) + self.mwc_lower
# temp64 = (698769069 * temp64) % 0x29A65EACFFFFFFFF
# See reference in MWC2.next().
temp64 = 698769069 * self.mwc_lower + self.mwc_upper
self.mwc_lower = temp64 & 0xFFFFFFFF
self.mwc_upper = (temp64 >> 32) & 0xFFFFFFFF
return self.mwc_lower
def current(self):
return self.mwc_lower
mwc = property(current)
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 24) & 0x1
if selector == 0:
self.mwc_upper ^= value_int
else:
self.mwc_lower ^= value_int
self.sanitise()
next(self)
return self.current()
def __iter__(self):
return self
def getstate(self):
return (self.mwc_upper, self.mwc_lower)
def setstate(self, state):
(self.mwc_upper, self.mwc_lower) = (int(val) & 0xFFFFFFFF for val in state)
self.sanitise()
def jumpahead(self, n):
# See MWC2.next() note on functional equivalence.
n = int(n) % self._MWC64_CYCLE_LEN
temp64 = (self.mwc_upper << 32) + self.mwc_lower
temp64 = pow(self._MWC64_MULT, n, self._MWC64_MODULO) * temp64 % self._MWC64_MODULO
self.mwc_lower = temp64 & 0xFFFFFFFF
self.mwc_upper = (temp64 >> 32) & 0xFFFFFFFF
def __repr__(self):
return self.__class__.__name__ + "(" + repr(int(self.mwc_upper)) + "," + repr(int(self.mwc_lower)) + ")"
class KISS(object):
'''"Keep It Simple Stupid" random number generator
It combines the MWC2, Cong, SHR3 generators. Period is
about 2**123.
This is based on, but not identical to, Marsaglia's
KISS generator as defined in his 1999 newsgroup post.
That generator most significantly has problems with its
SHR3 component (see notes on SHR3). Since we are not
keeping compatibility with the 1999 KISS generator for
that reason, we take the opportunity to slightly
update the MWC and Cong generators too.
'''
SIMPLERANDOM_MAX = 2**32 - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return KISS.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.random_mwc = MWC2(repeat_seed_iter)
self.random_cong = Cong(repeat_seed_iter)
self.random_shr3 = SHR3(repeat_seed_iter)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def __next__(self):
mwc_val = next(self.random_mwc)
cong_val = next(self.random_cong)
shr3_val = next(self.random_shr3)
return ((mwc_val ^ cong_val) + shr3_val) & 0xFFFFFFFF
def current(self):
return ((self.random_mwc.current() ^ self.random_cong.cong) + self.random_shr3.shr3) & 0xFFFFFFFF
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 24) & 0x3
if selector == 0:
self.random_mwc.mwc_upper ^= value_int
self.random_mwc._sanitise_upper()
self.random_mwc._next_upper()
elif selector == 1:
self.random_mwc.mwc_lower ^= value_int
self.random_mwc._sanitise_lower()
self.random_mwc._next_lower()
elif selector == 2:
self.random_cong.cong ^= value_int
# Cong doesn't need any sanitising
next(self.random_cong)
else: # selector == 3
self.random_shr3.shr3 ^= value_int
self.random_shr3.sanitise()
next(self.random_shr3)
return self.current()
def __iter__(self):
return self
def getstate(self):
return (self.random_mwc.getstate(), self.random_cong.getstate(), self.random_shr3.getstate())
def setstate(self, state):
(mwc_state, cong_state, shr3_state) = state
self.random_mwc.setstate(mwc_state)
self.random_cong.setstate(cong_state)
self.random_shr3.setstate(shr3_state)
def jumpahead(self, n):
self.random_mwc.jumpahead(n)
self.random_cong.jumpahead(n)
self.random_shr3.jumpahead(n)
def _get_mwc_upper(self):
return self.random_mwc.mwc_upper
def _set_mwc_upper(self, value):
self.random_mwc.mwc_upper = value
mwc_upper = property(_get_mwc_upper, _set_mwc_upper)
def _get_mwc_lower(self):
return self.random_mwc.mwc_lower
def _set_mwc_lower(self, value):
self.random_mwc.mwc_lower = value
mwc_lower = property(_get_mwc_lower, _set_mwc_lower)
def _get_mwc(self):
return self.random_mwc.current()
mwc = property(_get_mwc)
def _get_shr3(self):
return self.random_shr3.shr3
def _set_shr3(self, value):
self.random_shr3.shr3 = value
shr3 = property(_get_shr3, _set_shr3)
def _get_cong(self):
return self.random_cong.cong
def _set_cong(self, value):
self.random_cong.cong = value
cong = property(_get_cong, _set_cong)
def __repr__(self):
return (self.__class__.__name__ + "(" + repr(int(self.mwc_upper)) +
"," + repr(int(self.mwc_lower)) +
"," + repr(int(self.cong)) +
"," + repr(int(self.shr3)) + ")")
class KISS2(object):
'''"Keep It Simple Stupid" random number generator
It combines the MWC64, Cong, SHR3 generators. Period
is about 2**123.
This is a slightly updated KISS generator design, from
a newsgroup post in 2003:
http://groups.google.com/group/sci.math/msg/9959175f66dd138f
The MWC component uses a single 64-bit calculation,
instead of two 32-bit calculations that are combined.
'''
SIMPLERANDOM_MAX = 2**32 - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return KISS2.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.random_mwc = MWC64(repeat_seed_iter)
self.random_cong = Cong(repeat_seed_iter)
self.random_shr3 = SHR3(repeat_seed_iter)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def __next__(self):
mwc_val = next(self.random_mwc)
cong_val = next(self.random_cong)
shr3_val = next(self.random_shr3)
return (mwc_val + cong_val + shr3_val) & 0xFFFFFFFF
def current(self):
return (self.random_mwc.current() + self.random_cong.cong + self.random_shr3.shr3) & 0xFFFFFFFF
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 24) & 0x3
if selector == 0:
self.random_mwc.mwc_upper ^= value_int
self.random_mwc.sanitise()
next(self.random_mwc)
elif selector == 1:
self.random_mwc.mwc_lower ^= value_int
self.random_mwc.sanitise()
next(self.random_mwc)
elif selector == 2:
self.random_cong.cong ^= value_int
# Cong doesn't need any sanitising
next(self.random_cong)
else: # selector == 3
self.random_shr3.shr3 ^= value_int
self.random_shr3.sanitise()
next(self.random_shr3)
return self.current()
def __iter__(self):
return self
def getstate(self):
return (self.random_mwc.getstate(), self.random_cong.getstate(), self.random_shr3.getstate())
def setstate(self, state):
(mwc_state, cong_state, shr3_state) = state
self.random_mwc.setstate(mwc_state)
self.random_cong.setstate(cong_state)
self.random_shr3.setstate(shr3_state)
def jumpahead(self, n):
self.random_mwc.jumpahead(n)
self.random_cong.jumpahead(n)
self.random_shr3.jumpahead(n)
def _get_mwc_upper(self):
return self.random_mwc.mwc_upper
def _set_mwc_upper(self, value):
self.random_mwc.mwc_upper = value
mwc_upper = property(_get_mwc_upper, _set_mwc_upper)
def _get_mwc_lower(self):
return self.random_mwc.mwc_lower
def _set_mwc_lower(self, value):
self.random_mwc.mwc_lower = value
mwc_lower = property(_get_mwc_lower, _set_mwc_lower)
def _get_mwc(self):
return self.random_mwc.mwc
mwc = property(_get_mwc)
def _get_shr3(self):
return self.random_shr3.shr3
def _set_shr3(self, value):
self.random_shr3.shr3 = value
shr3 = property(_get_shr3, _set_shr3)
def _get_cong(self):
return self.random_cong.cong
def _set_cong(self, value):
self.random_cong.cong = value
cong = property(_get_cong, _set_cong)
def __repr__(self):
return (self.__class__.__name__ + "(" + repr(int(self.mwc_upper)) +
"," + repr(int(self.mwc_lower)) +
"," + repr(int(self.cong)) +
"," + repr(int(self.shr3)) + ")")
def lfsr_next_one_seed(seed_iter, min_value_shift):
"""High-quality seeding for LFSR generators.
The LFSR generator components discard a certain number of their lower bits
when generating each output. The significant bits of their state must not
all be zero. We must ensure that when seeding the generator.
In case generators are seeded from an incrementing input (such as a system
timer), and between increments only the lower bits may change, we would
also like the lower bits of the input to change the initial state, and not
just be discarded. So we do basic manipulation of the seed input value to
ensure that all bits of the seed input affect the initial state.
"""
try:
seed = next(seed_iter)
except StopIteration:
return 0xFFFFFFFF
else:
if seed is None:
return 0xFFFFFFFF
else:
seed = int(seed) & 0xFFFFFFFF
working_seed = (seed ^ (seed << 16)) & 0xFFFFFFFF
min_value = 1 << min_value_shift
if working_seed < min_value:
working_seed = (seed << 24) & 0xFFFFFFFF
if working_seed < min_value:
working_seed ^= 0xFFFFFFFF
return working_seed
def lfsr_validate_one_seed(seed, min_value_shift):
'''Validate seeds for LFSR generators
The LFSR generator components discard a certain number of their lower bits
when generating each output. The significant bits of their state must not
all be zero. We must ensure that when seeding the generator.
This is a light-weight validation of state, used from setstate().
'''
min_value = 1 << min_value_shift
if seed < min_value:
seed ^= 0xFFFFFFFF
return seed
def lfsr_state_z(z):
return int(z ^ ((z << 16) & 0xFFFFFFFF))
def lfsr_repr_z(z):
return repr(int(z ^ ((z << 16) & 0xFFFFFFFF)))
class LFSR113(object):
'''Combined LFSR random number generator by L'Ecuyer
It combines 4 LFSR generators. The generators have been
chosen for maximal equidistribution.
The period is approximately 2**113.
"Tables of Maximally-Equidistributed Combined Lfsr Generators"
P. L'Ecuyer
Mathematics of Computation, 68, 225 (1999), 261-269.
'''
SIMPLERANDOM_MAX = 2**32 - 1
_LFSR113_1_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,6)
_LFSR113_1_MATRIX_b = BitColumnMatrix.shift(32,-13)
_LFSR113_1_MATRIX_c = BitColumnMatrix.mask(32, 1, 32)
_LFSR113_1_MATRIX_d = BitColumnMatrix.shift(32,18)
_LFSR113_1_MATRIX = _LFSR113_1_MATRIX_d * _LFSR113_1_MATRIX_c + _LFSR113_1_MATRIX_b * _LFSR113_1_MATRIX_a
_LFSR113_1_CYCLE_LEN = 2**(32 - 1) - 1
_LFSR113_2_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,2)
_LFSR113_2_MATRIX_b = BitColumnMatrix.shift(32,-27)
_LFSR113_2_MATRIX_c = BitColumnMatrix.mask(32, 3, 32)
_LFSR113_2_MATRIX_d = BitColumnMatrix.shift(32,2)
_LFSR113_2_MATRIX = _LFSR113_2_MATRIX_d * _LFSR113_2_MATRIX_c + _LFSR113_2_MATRIX_b * _LFSR113_2_MATRIX_a
_LFSR113_2_CYCLE_LEN = 2**(32 - 3) - 1
_LFSR113_3_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,13)
_LFSR113_3_MATRIX_b = BitColumnMatrix.shift(32,-21)
_LFSR113_3_MATRIX_c = BitColumnMatrix.mask(32, 4, 32)
_LFSR113_3_MATRIX_d = BitColumnMatrix.shift(32,7)
_LFSR113_3_MATRIX = _LFSR113_3_MATRIX_d * _LFSR113_3_MATRIX_c + _LFSR113_3_MATRIX_b * _LFSR113_3_MATRIX_a
_LFSR113_3_CYCLE_LEN = 2**(32 - 4) - 1
_LFSR113_4_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,3)
_LFSR113_4_MATRIX_b = BitColumnMatrix.shift(32,-12)
_LFSR113_4_MATRIX_c = BitColumnMatrix.mask(32, 7, 32)
_LFSR113_4_MATRIX_d = BitColumnMatrix.shift(32,13)
_LFSR113_4_MATRIX = _LFSR113_4_MATRIX_d * _LFSR113_4_MATRIX_c + _LFSR113_4_MATRIX_b * _LFSR113_4_MATRIX_a
_LFSR113_4_CYCLE_LEN = 2**(32 - 7) - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return LFSR113.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.z1 = lfsr_next_one_seed(repeat_seed_iter, 1)
self.z2 = lfsr_next_one_seed(repeat_seed_iter, 3)
self.z3 = lfsr_next_one_seed(repeat_seed_iter, 4)
self.z4 = lfsr_next_one_seed(repeat_seed_iter, 7)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
self.z1 = lfsr_validate_one_seed(self.z1, 1)
self.z2 = lfsr_validate_one_seed(self.z2, 3)
self.z3 = lfsr_validate_one_seed(self.z3, 4)
self.z4 = lfsr_validate_one_seed(self.z4, 7)
def _next_z1(self):
b = (((self.z1 & 0x03FFFFFF) << 6) ^ self.z1) >> 13
self.z1 = ((self.z1 & 0x00003FFE) << 18) ^ b
def _next_z2(self):
b = (((self.z2 & 0x3FFFFFFF) << 2) ^ self.z2) >> 27
self.z2 = ((self.z2 & 0x3FFFFFF8) << 2) ^ b
def _next_z3(self):
b = (((self.z3 & 0x0007FFFF) << 13) ^ self.z3) >> 21
self.z3 = ((self.z3 & 0x01FFFFF0) << 7) ^ b
def _next_z4(self):
b = (((self.z4 & 0x1FFFFFFF) << 3) ^ self.z4) >> 12
self.z4 = ((self.z4 & 0x0007FF80) << 13) ^ b
def __next__(self):
b = (((self.z1 & 0x03FFFFFF) << 6) ^ self.z1) >> 13
self.z1 = ((self.z1 & 0x00003FFE) << 18) ^ b
b = (((self.z2 & 0x3FFFFFFF) << 2) ^ self.z2) >> 27
self.z2 = ((self.z2 & 0x3FFFFFF8) << 2) ^ b
b = (((self.z3 & 0x0007FFFF) << 13) ^ self.z3) >> 21
self.z3 = ((self.z3 & 0x01FFFFF0) << 7) ^ b
b = (((self.z4 & 0x1FFFFFFF) << 3) ^ self.z4) >> 12
self.z4 = ((self.z4 & 0x0007FF80) << 13) ^ b
return self.z1 ^ self.z2 ^ self.z3 ^ self.z4
def current(self):
return self.z1 ^ self.z2 ^ self.z3 ^ self.z4
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 30) & 0x3
if selector == 0:
self.z1 = lfsr_validate_one_seed(self.z1 ^ value_int, 1)
self._next_z1()
elif selector == 1:
self.z2 = lfsr_validate_one_seed(self.z2 ^ value_int, 3)
self._next_z2()
elif selector == 2:
self.z3 = lfsr_validate_one_seed(self.z3 ^ value_int, 4)
self._next_z3()
else: # selector == 3
self.z4 = lfsr_validate_one_seed(self.z4 ^ value_int, 7)
self._next_z4()
return self.current()
def __iter__(self):
return self
def getstate(self):
return (lfsr_state_z(self.z1), lfsr_state_z(self.z2), lfsr_state_z(self.z3), lfsr_state_z(self.z4))
def setstate(self, state):
self.seed(state)
def jumpahead(self, n):
n_1 = int(n) % self._LFSR113_1_CYCLE_LEN
n_2 = int(n) % self._LFSR113_2_CYCLE_LEN
n_3 = int(n) % self._LFSR113_3_CYCLE_LEN
n_4 = int(n) % self._LFSR113_4_CYCLE_LEN
z1 = pow(self._LFSR113_1_MATRIX, n_1) * self.z1
self.z1 = z1
z2 = pow(self._LFSR113_2_MATRIX, n_2) * self.z2
self.z2 = z2
z3 = pow(self._LFSR113_3_MATRIX, n_3) * self.z3
self.z3 = z3
z4 = pow(self._LFSR113_4_MATRIX, n_4) * self.z4
self.z4 = z4
def __repr__(self):
return (self.__class__.__name__ + "(" + lfsr_repr_z(self.z1) +
"," + lfsr_repr_z(self.z2) +
"," + lfsr_repr_z(self.z3) +
"," + lfsr_repr_z(self.z4) + ")")
class LFSR88(object):
'''Combined LFSR random number generator by L'Ecuyer
It combines 3 LFSR generators. The generators have been
chosen for maximal equidistribution.
The period is approximately 2**88.
"Maximally Equidistributed Combined Tausworthe Generators"
P. L'Ecuyer
Mathematics of Computation, 65, 213 (1996), 203-213.
'''
SIMPLERANDOM_MAX = 2**32 - 1
_LFSR88_1_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,13)
_LFSR88_1_MATRIX_b = BitColumnMatrix.shift(32,-19)
_LFSR88_1_MATRIX_c = BitColumnMatrix.mask(32, 1, 32)
_LFSR88_1_MATRIX_d = BitColumnMatrix.shift(32,12)
_LFSR88_1_MATRIX = _LFSR88_1_MATRIX_d * _LFSR88_1_MATRIX_c + _LFSR88_1_MATRIX_b * _LFSR88_1_MATRIX_a
_LFSR88_1_CYCLE_LEN = 2**(32 - 1) - 1
_LFSR88_2_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,2)
_LFSR88_2_MATRIX_b = BitColumnMatrix.shift(32,-25)
_LFSR88_2_MATRIX_c = BitColumnMatrix.mask(32, 3, 32)
_LFSR88_2_MATRIX_d = BitColumnMatrix.shift(32,4)
_LFSR88_2_MATRIX = _LFSR88_2_MATRIX_d * _LFSR88_2_MATRIX_c + _LFSR88_2_MATRIX_b * _LFSR88_2_MATRIX_a
_LFSR88_2_CYCLE_LEN = 2**(32 - 3) - 1
_LFSR88_3_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,3)
_LFSR88_3_MATRIX_b = BitColumnMatrix.shift(32,-11)
_LFSR88_3_MATRIX_c = BitColumnMatrix.mask(32, 4, 32)
_LFSR88_3_MATRIX_d = BitColumnMatrix.shift(32,17)
_LFSR88_3_MATRIX = _LFSR88_3_MATRIX_d * _LFSR88_3_MATRIX_c + _LFSR88_3_MATRIX_b * _LFSR88_3_MATRIX_a
_LFSR88_3_CYCLE_LEN = 2**(32 - 4) - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return LFSR88.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.z1 = lfsr_next_one_seed(repeat_seed_iter, 1)
self.z2 = lfsr_next_one_seed(repeat_seed_iter, 3)
self.z3 = lfsr_next_one_seed(repeat_seed_iter, 4)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
self.z1 = lfsr_validate_one_seed(self.z1, 1)
self.z2 = lfsr_validate_one_seed(self.z2, 3)
self.z3 = lfsr_validate_one_seed(self.z3, 4)
def _next_z1(self):
b = (((self.z1 & 0x0007FFFF) << 13) ^ self.z1) >> 19
self.z1 = ((self.z1 & 0x000FFFFE) << 12) ^ b
def _next_z2(self):
b = (((self.z2 & 0x3FFFFFFF) << 2) ^ self.z2) >> 25
self.z2 = ((self.z2 & 0x0FFFFFF8) << 4) ^ b
def _next_z3(self):
b = (((self.z3 & 0x1FFFFFFF) << 3) ^ self.z3) >> 11
self.z3 = ((self.z3 & 0x00007FF0) << 17) ^ b
def __next__(self):
b = (((self.z1 & 0x0007FFFF) << 13) ^ self.z1) >> 19
self.z1 = ((self.z1 & 0x000FFFFE) << 12) ^ b
b = (((self.z2 & 0x3FFFFFFF) << 2) ^ self.z2) >> 25
self.z2 = ((self.z2 & 0x0FFFFFF8) << 4) ^ b
b = (((self.z3 & 0x1FFFFFFF) << 3) ^ self.z3) >> 11
self.z3 = ((self.z3 & 0x00007FF0) << 17) ^ b
return self.z1 ^ self.z2 ^ self.z3
def current(self):
return self.z1 ^ self.z2 ^ self.z3
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
if current < 1431655765: # constant is 2^32 / 3
self.z1 = lfsr_validate_one_seed(self.z1 ^ value_int, 1)
self._next_z1()
elif current < 2863311531: # constant is 2^32 * 2 / 3
self.z2 = lfsr_validate_one_seed(self.z2 ^ value_int, 3)
self._next_z2()
else:
self.z3 = lfsr_validate_one_seed(self.z3 ^ value_int, 4)
self._next_z3()
return self.current()
def __iter__(self):
return self
def getstate(self):
return (lfsr_state_z(self.z1), lfsr_state_z(self.z2), lfsr_state_z(self.z3))
def setstate(self, state):
self.seed(state)
def jumpahead(self, n):
n_1 = int(n) % self._LFSR88_1_CYCLE_LEN
n_2 = int(n) % self._LFSR88_2_CYCLE_LEN
n_3 = int(n) % self._LFSR88_3_CYCLE_LEN
z1 = pow(self._LFSR88_1_MATRIX, n_1) * self.z1
self.z1 = z1
z2 = pow(self._LFSR88_2_MATRIX, n_2) * self.z2
self.z2 = z2
z3 = pow(self._LFSR88_3_MATRIX, n_3) * self.z3
self.z3 = z3
def __repr__(self):
return (self.__class__.__name__ + "(" + lfsr_repr_z(self.z1) +
"," + lfsr_repr_z(self.z2) +
"," + lfsr_repr_z(self.z3) + ")")
| mit | -5,898,886,228,180,015,000 | 35.298759 | 125 | 0.576945 | false | 3.21465 | false | false | false |
GNOME/gedit-plugins | plugins/commander/modules/align.py | 1 | 8900 | # -*- coding: utf-8 -*-
#
# align.py - align commander module
#
# Copyright (C) 2010 - Jesse van den Kieboom
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
import commander.commands as commands
import commander.commands.completion
import commander.commands.result
import commander.commands.exceptions
from functools import reduce
import re
__commander_module__ = True
def _get_groups(m, group, add_ws_group):
if len(m.groups()) <= group - 1:
gidx = 0
else:
gidx = group
if len(m.groups()) <= add_ws_group - 1:
wsidx = 0
else:
wsidx = add_ws_group
# Whitespace group must be contained in align group
if m.start(wsidx) < m.start(gidx) or m.end(wsidx) > m.end(gidx):
wsidx = gidx
return (gidx, wsidx)
class Line:
def __init__(self, line, reg, tabwidth):
self.tabwidth = tabwidth
self.line = line
# All the separators
self.matches = list(reg.finditer(line))
# @newline initially contains the first column
if not self.matches:
# No separator found
self.newline = str(line)
else:
# Up to first separator
self.newline = line[0:self.matches[0].start(0)]
def matches_len(self):
return len(self.matches)
def new_len(self, extra=''):
return len((self.newline + extra).expandtabs(self.tabwidth))
def match(self, idx):
if idx >= self.matches_len():
return None
return self.matches[idx]
def append(self, idx, num, group, add_ws_group):
m = self.match(idx)
if m == None:
return
gidx, wsidx = _get_groups(m, group, add_ws_group)
# Append leading match
self.newline += self.line[m.start(0):m.start(gidx)]
# Now align by replacing wsidx with spaces
prefix = self.line[m.start(gidx):m.start(wsidx)]
suffix = self.line[m.end(wsidx):m.end(gidx)]
sp = ''
while True:
bridge = prefix + sp + suffix
if self.new_len(bridge) < num:
sp += ' '
else:
break
self.newline += bridge
# Then append the rest of the match
mnext = self.match(idx + 1)
if mnext == None:
endidx = None
else:
endidx = mnext.start(0)
self.newline += self.line[m.end(gidx):endidx]
def __str__(self):
return self.newline
def _find_max_align(lines, idx, group, add_ws_group):
num = 0
# We will align on 'group', by adding spaces to 'add_ws_group'
for line in lines:
m = line.match(idx)
if m != None:
gidx, wsidx = _get_groups(m, group, add_ws_group)
# until the start
extra = line.line[m.start(0):m.start(wsidx)] + line.line[m.end(wsidx):m.end(gidx)]
# Measure where to align it
l = line.new_len(extra)
else:
l = line.new_len()
if l > num:
num = l
return num
def _regex(view, reg, group, additional_ws, add_ws_group, flags=0):
buf = view.get_buffer()
# Get the selection of lines to align columns on
bounds = buf.get_selection_bounds()
if not bounds:
start = buf.get_iter_at_mark(buf.get_insert())
start.set_line_offset(0)
end = start.copy()
if not end.ends_line():
end.forward_to_line_end()
bounds = (start, end)
if not bounds[0].equal(bounds[1]) and bounds[1].starts_line():
bounds[1].backward_line()
if not bounds[1].ends_line():
bounds[1].forward_to_line_end()
# Get the regular expression from the user
if reg == None:
reg, words, modifier = (yield commander.commands.result.Prompt('Regex:'))
# Compile the regular expression
try:
reg = re.compile(reg, flags)
except Exception as e:
raise commander.commands.exceptions.Execute('Failed to compile regular expression: %s' % (e,))
# Query the user to provide a regex group number to align on
if group == None:
group, words, modifier = (yield commander.commands.result.Prompt('Group (1):'))
try:
group = int(group)
except:
group = 1
# Query the user for additional whitespace to insert for separating items
if additional_ws == None:
additional_ws, words, modifier = (yield commander.commands.result.Prompt('Additional whitespace (0):'))
try:
additional_ws = int(additional_ws)
except:
additional_ws = 0
# Query the user for the regex group number on which to add the
# whitespace
if add_ws_group == None:
add_ws_group, words, modifier = (yield commander.commands.result.Prompt('Whitespace group (1):'))
try:
add_ws_group = int(add_ws_group)
except:
add_ws_group = -1
# By default, add the whitespace on the group on which the columns are
# aligned
if add_ws_group < 0:
add_ws_group = group
start, end = bounds
if not start.starts_line():
start.set_line_offset(0)
if not end.ends_line():
end.forward_to_line_end()
lines = start.get_text(end).splitlines()
newlines = []
num = 0
tabwidth = view.get_tab_width()
# Construct Line objects for all the lines
newlines = [Line(line, reg, tabwidth) for line in lines]
# Calculate maximum number of matches (i.e. columns)
num = reduce(lambda x, y: max(x, y.matches_len()), newlines, 0)
for i in range(num):
al = _find_max_align(newlines, i, group, add_ws_group)
for line in newlines:
line.append(i, al + additional_ws, group, add_ws_group)
# Replace lines
aligned = str.join('\n', [x.newline for x in newlines])
buf.begin_user_action()
buf.delete(bounds[0], bounds[1])
m = buf.create_mark(None, bounds[0], True)
buf.insert(bounds[1], aligned)
buf.select_range(buf.get_iter_at_mark(m), bounds[1])
buf.delete_mark(m)
buf.end_user_action()
yield commander.commands.result.DONE
def __default__(view, reg='\s+', align_group=1, padding=1, padding_group=-1):
"""Align selected in columns using a regular expression: align.regex [<regex>=<i>\s+</i>] [<align-group>] [<padding>] [<padding-group>=<i><align-group></i>]
Align the selected text in columns separated by the specified regular expression.
The optional <align-group> argument specifies on which group in the regular expression
the text should be aligned and defaults to 1 (or 0 in the case that there is
no explicit group specified). The <align-group> will be <b>replaced</b>
with whitespace to align the columns. The optional <padding> argument can
be used to add additional whitespace to the column separation. The last
optional argument (<padding-group>) can be used to specify a separate
group (which must be contained in <align-group>) which to replace with
whitespace.
The regular expression will be matched in case-sensitive mode"""
yield _regex(view, reg, align_group, padding, padding_group)
def i(view, reg='\s+', align_group=1, padding=1, padding_group=-1):
"""Align selected in columns using a regular expression: align.regex [<regex>=<i>\s+</i>] [<align-group>] [<padding>] [<padding-group>=<i><align-group></i>]
Align the selected text in columns separated by the specified regular expression.
The optional <align-group> argument specifies on which group in the regular expression
the text should be aligned and defaults to 1 (or 0 in the case that there is
no explicit group specified). The <align-group> will be <b>replaced</b>
with whitespace to align the columns. The optional <padding> argument can
be used to add additional whitespace to the column separation. The last
optional argument (<padding-group>) can be used to specify a separate
group (which must be contained in <align-group>) which to replace with
whitespace.
The regular expression will be matched in case-insensitive mode"""
yield _regex(view, reg, align_group, padding, padding_group, re.IGNORECASE)
# ex:ts=4:et
| gpl-2.0 | 2,460,157,650,009,915,400 | 30.448763 | 190 | 0.637978 | false | 3.574297 | false | false | false |
Teagan42/home-assistant | homeassistant/components/google_assistant/const.py | 1 | 4660 | """Constants for Google Assistant."""
from homeassistant.components import (
alarm_control_panel,
binary_sensor,
camera,
climate,
cover,
fan,
group,
input_boolean,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
vacuum,
)
DOMAIN = "google_assistant"
GOOGLE_ASSISTANT_API_ENDPOINT = "/api/google_assistant"
CONF_EXPOSE = "expose"
CONF_ENTITY_CONFIG = "entity_config"
CONF_EXPOSE_BY_DEFAULT = "expose_by_default"
CONF_EXPOSED_DOMAINS = "exposed_domains"
CONF_PROJECT_ID = "project_id"
CONF_ALIASES = "aliases"
CONF_API_KEY = "api_key"
CONF_ROOM_HINT = "room"
CONF_ALLOW_UNLOCK = "allow_unlock"
CONF_SECURE_DEVICES_PIN = "secure_devices_pin"
CONF_REPORT_STATE = "report_state"
CONF_SERVICE_ACCOUNT = "service_account"
CONF_CLIENT_EMAIL = "client_email"
CONF_PRIVATE_KEY = "private_key"
DEFAULT_EXPOSE_BY_DEFAULT = True
DEFAULT_EXPOSED_DOMAINS = [
"climate",
"cover",
"fan",
"group",
"input_boolean",
"light",
"media_player",
"scene",
"script",
"switch",
"vacuum",
"lock",
"binary_sensor",
"sensor",
"alarm_control_panel",
]
PREFIX_TYPES = "action.devices.types."
TYPE_CAMERA = PREFIX_TYPES + "CAMERA"
TYPE_LIGHT = PREFIX_TYPES + "LIGHT"
TYPE_SWITCH = PREFIX_TYPES + "SWITCH"
TYPE_VACUUM = PREFIX_TYPES + "VACUUM"
TYPE_SCENE = PREFIX_TYPES + "SCENE"
TYPE_FAN = PREFIX_TYPES + "FAN"
TYPE_THERMOSTAT = PREFIX_TYPES + "THERMOSTAT"
TYPE_LOCK = PREFIX_TYPES + "LOCK"
TYPE_BLINDS = PREFIX_TYPES + "BLINDS"
TYPE_GARAGE = PREFIX_TYPES + "GARAGE"
TYPE_OUTLET = PREFIX_TYPES + "OUTLET"
TYPE_SENSOR = PREFIX_TYPES + "SENSOR"
TYPE_DOOR = PREFIX_TYPES + "DOOR"
TYPE_TV = PREFIX_TYPES + "TV"
TYPE_SPEAKER = PREFIX_TYPES + "SPEAKER"
TYPE_ALARM = PREFIX_TYPES + "SECURITYSYSTEM"
SERVICE_REQUEST_SYNC = "request_sync"
HOMEGRAPH_URL = "https://homegraph.googleapis.com/"
HOMEGRAPH_SCOPE = "https://www.googleapis.com/auth/homegraph"
HOMEGRAPH_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
REQUEST_SYNC_BASE_URL = HOMEGRAPH_URL + "v1/devices:requestSync"
REPORT_STATE_BASE_URL = HOMEGRAPH_URL + "v1/devices:reportStateAndNotification"
# Error codes used for SmartHomeError class
# https://developers.google.com/actions/reference/smarthome/errors-exceptions
ERR_DEVICE_OFFLINE = "deviceOffline"
ERR_DEVICE_NOT_FOUND = "deviceNotFound"
ERR_VALUE_OUT_OF_RANGE = "valueOutOfRange"
ERR_NOT_SUPPORTED = "notSupported"
ERR_PROTOCOL_ERROR = "protocolError"
ERR_UNKNOWN_ERROR = "unknownError"
ERR_FUNCTION_NOT_SUPPORTED = "functionNotSupported"
ERR_ALREADY_DISARMED = "alreadyDisarmed"
ERR_ALREADY_ARMED = "alreadyArmed"
ERR_CHALLENGE_NEEDED = "challengeNeeded"
ERR_CHALLENGE_NOT_SETUP = "challengeFailedNotSetup"
ERR_TOO_MANY_FAILED_ATTEMPTS = "tooManyFailedAttempts"
ERR_PIN_INCORRECT = "pinIncorrect"
ERR_USER_CANCELLED = "userCancelled"
# Event types
EVENT_COMMAND_RECEIVED = "google_assistant_command"
EVENT_QUERY_RECEIVED = "google_assistant_query"
EVENT_SYNC_RECEIVED = "google_assistant_sync"
DOMAIN_TO_GOOGLE_TYPES = {
camera.DOMAIN: TYPE_CAMERA,
climate.DOMAIN: TYPE_THERMOSTAT,
cover.DOMAIN: TYPE_BLINDS,
fan.DOMAIN: TYPE_FAN,
group.DOMAIN: TYPE_SWITCH,
input_boolean.DOMAIN: TYPE_SWITCH,
light.DOMAIN: TYPE_LIGHT,
lock.DOMAIN: TYPE_LOCK,
media_player.DOMAIN: TYPE_SWITCH,
scene.DOMAIN: TYPE_SCENE,
script.DOMAIN: TYPE_SCENE,
switch.DOMAIN: TYPE_SWITCH,
vacuum.DOMAIN: TYPE_VACUUM,
alarm_control_panel.DOMAIN: TYPE_ALARM,
}
DEVICE_CLASS_TO_GOOGLE_TYPES = {
(cover.DOMAIN, cover.DEVICE_CLASS_GARAGE): TYPE_GARAGE,
(cover.DOMAIN, cover.DEVICE_CLASS_DOOR): TYPE_DOOR,
(switch.DOMAIN, switch.DEVICE_CLASS_SWITCH): TYPE_SWITCH,
(switch.DOMAIN, switch.DEVICE_CLASS_OUTLET): TYPE_OUTLET,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_DOOR): TYPE_DOOR,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_GARAGE_DOOR): TYPE_GARAGE,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_LOCK): TYPE_SENSOR,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_OPENING): TYPE_SENSOR,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_WINDOW): TYPE_SENSOR,
(media_player.DOMAIN, media_player.DEVICE_CLASS_TV): TYPE_TV,
(media_player.DOMAIN, media_player.DEVICE_CLASS_SPEAKER): TYPE_SPEAKER,
(sensor.DOMAIN, sensor.DEVICE_CLASS_TEMPERATURE): TYPE_SENSOR,
(sensor.DOMAIN, sensor.DEVICE_CLASS_HUMIDITY): TYPE_SENSOR,
}
CHALLENGE_ACK_NEEDED = "ackNeeded"
CHALLENGE_PIN_NEEDED = "pinNeeded"
CHALLENGE_FAILED_PIN_NEEDED = "challengeFailedPinNeeded"
STORE_AGENT_USER_IDS = "agent_user_ids"
SOURCE_CLOUD = "cloud"
SOURCE_LOCAL = "local"
| apache-2.0 | 8,215,510,004,811,099,000 | 30.486486 | 80 | 0.71824 | false | 2.971939 | false | false | false |
LoyolaCSDepartment/LDA-ICPC-2014 | topic-models/topic-count/xmlsplit.py | 1 | 1409 | #! /usr/bin/env /usr/bin/python3
import os
import sys
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def xmlSplit(infile_name, dest_dir):
try:
# in_file = open('{0}{1}'.format(folder, filename), 'r', encoding='latin_1')
in_file = open(infile_name, 'r', encoding='latin_1')
except IOError:
print("File not found.")
return
dest_dir += '/' # redundant ok; without sucks!
# dest_dir = '{0}input/'.format(folder)
ensure_dir(dest_dir)
file_num = 1
out_file = open('%s%d.txt' % (dest_dir, file_num), 'w')
file_open = True
for x in in_file:
if x[-1] != '\n':
x = '%s\n' % (x)
if not file_open:
file_open = True
out_file = open(next_file, 'w')
# hack to remove non-ascii characters
x = ''.join([c for c in x if ord(c) < 128])
out_file.write('%s' % (x))
if x.startswith('</source>'):
out_file.close()
file_num += 1
next_file = '%s%d.txt' % (dest_dir, file_num)
file_open = False
print('{0} files'.format(file_num - 1) + " left in " + dest_dir)
out_file.close()
in_file.close()
if len(sys.argv) != 3:
print("usage: " + sys.argv[0] + " <input xml file> <output directory>")
sys.exit (-1)
xmlSplit(sys.argv[1], sys.argv[2])
# example call: xmlsplit.py cook.xml /scratch/topics/out')
# xmlSplit('<FIX ME>/topic-models/topic-count/sources/', 'cook.xml')
| mit | 8,342,931,913,768,487,000 | 25.092593 | 80 | 0.582683 | false | 2.773622 | false | false | false |
AMOboxTV/AMOBox.LegoBuild | plugin.video.salts/scrapers/izlemeyedeger_scraper.py | 1 | 3982 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
from salts_lib import dom_parser
from salts_lib import kodi
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
import scraper
BASE_URL = 'http://www.izlemeyedeger.com'
class IzlemeyeDeger_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'IzlemeyeDeger'
def resolve_link(self, link):
return link
def format_source_label(self, item):
label = '[%s] %s' % (item['quality'], item['host'])
if 'views' in item and item['views']:
label += ' (%s views)' % item['views']
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
embed_url = dom_parser.parse_dom(html, 'meta', {'itemprop': 'embedURL'}, ret='content')
if embed_url:
html = self._http_get(embed_url[0], cache_limit=.5)
for match in re.finditer('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"', html):
stream_url, height = match.groups()
stream_url = stream_url.replace('\\&', '&')
host = self._get_direct_hostname(stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(height)
stream_url += '|User-Agent=%s&Referer=%s' % (scraper_utils.get_ua(), urllib.quote(embed_url[0]))
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._default_get_url(video)
def search(self, video_type, title, year, season=''):
results = []
search_url = urlparse.urljoin(self.base_url, '/arama?q=%s')
search_url = search_url % (urllib.quote_plus(title))
html = self._http_get(search_url, cache_limit=1)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'section'})
if fragment:
for match in re.finditer('href="([^"]+).*?class="year">\s*(\d+).*?class="video-title">\s*([^<]+)', fragment[0], re.DOTALL):
url, match_year, match_title = match.groups('')
match_title = match_title.strip()
if not year or not match_year or year == match_year:
result = {'url': scraper_utils.pathify_url(url), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
results.append(result)
return results
| gpl-2.0 | -3,967,336,712,143,353,000 | 40.479167 | 165 | 0.592416 | false | 3.742481 | false | false | false |
kralf/morsel | python/lib/morsel/nodes/ode/solids/mesh.py | 1 | 1092 | from morsel.panda import *
from morsel.nodes.node import Node
from morsel.nodes.ode.object import Object
from morsel.nodes.facade import Mesh as _Mesh
from morsel.nodes.ode.solid import Solid
#-------------------------------------------------------------------------------
class Mesh(Solid):
def __init__(self, **kargs):
super(Mesh, self).__init__(**kargs)
#-------------------------------------------------------------------------------
def getMesh(self):
if not self._mesh and self.object:
self._mesh = _Mesh(parent = self)
self._mesh.copyFrom(self.object.mesh.model, flatten = True)
return self._mesh
mesh = property(getMesh)
#-------------------------------------------------------------------------------
def fit(self, node):
Solid.fit(self, node)
mesh = _Mesh(position = self.globalPosition, orientation =
self.globalOrientation)
mesh.copyFrom(node.mesh, flatten = True)
data = panda.OdeTriMeshData(mesh)
mesh.detachNode()
self.geometry = panda.OdeTriMeshGeom(node.world.space, data)
| gpl-2.0 | -6,021,325,268,098,747,000 | 29.361111 | 80 | 0.519231 | false | 4.216216 | false | false | false |
alexhilton/miscellaneous | python/pygrep.py | 1 | 3334 | #!/usr/bin/env python
"""A Python version of grep utility.
Search one or more named input files against one ore more given patterns.
Print the line containing the match, if there are any.
"""
from optparse import OptionParser;
import re;
import fileinput;
import os.path;
FILENAME = '\033[92m';
LINENO = '\033[94m';
MATCH = '\033[91m';
ENDC = '\033[0m';
class MultiMatcher(object):
"""A set of searchable Regular Expression Patterns
Accept one or more regular expression such that if any one of them
matches a line the first successful match is returned.
"""
def __init__(self, multipattern, ignore_case):
flags = 0;
if ignore_case:
flags = re.IGNORECASE;
self.multipattern = [re.compile(pattern, flags) for pattern in multipattern];
def search(self, line):
for pattern in self.multipattern:
m = pattern.search(line);
if m is not None:
return m;
def build_options():
parser = OptionParser(usage = "usage: %prog [options] -e PATTERN files", version = "%prog 1.0");
parser.add_option("-i", "--ignore-case", action = "store_true", dest = "ignore_case",
default = False, help = "ignore case of letters when matching");
parser.add_option("-r", "--recursive", action = "store_true", dest = "recursive",
default = False, help = "search for files in directory recursively");
parser.add_option("-n", "--negative", action = "store_true", dest = "negative",
default = False, help = "show the lines that does not match the pattern");
parser.add_option("-e", "--regexpr", action = "append", dest = "regexpr",
help = "specify pattern expression on which to match");
return parser;
def do_matching(filename, matcher):
for line in fileinput.input(filename):
line = line.rstrip();
match = matcher.search(line);
if options.negative:
if match is None:
print "%s%s:%s%d %s%s" % (FILENAME, fileinput.filename(), LINENO, fileinput.filelineno(), ENDC, line);
else:
if match is not None:
first_part = line[:match.start()];
the_match = line[match.start():match.end()];
second_part = line[match.end():];
print "%s%s:%s%d %s%s%s%s%s%s" % (FILENAME, fileinput.filename(), LINENO, fileinput.filelineno(), \
ENDC, first_part, MATCH, the_match, ENDC, second_part);
def main():
global options;
parser = build_options();
options, args = parser.parse_args();
if not options.regexpr:
parser.error("You must specify at least one PATTERN");
if not args:
parser.error("You must specify at least one input file or directory");
matcher = MultiMatcher(options.regexpr, options.ignore_case);
for filename in args:
if not os.path.exists(filename):
print "No such file or directory: ", filename;
continue;
if options.recursive and os.path.isdir(filename):
for root, dirs, files in os.walk(filename):
[do_matching(os.path.join(root, entry), matcher) for entry in files];
elif os.path.isfile(filename):
do_matching(filename, matcher);
if __name__ == "__main__":
main();
| apache-2.0 | 1,519,449,779,832,885,800 | 37.321839 | 118 | 0.607379 | false | 3.913146 | false | false | false |
etkirsch/legends-of-erukar | erukar/content/inventory/weapons/standard/Focus.py | 1 | 1029 | import numpy as np
from erukar.system.engine.inventory import ArcaneWeapon
class Focus(ArcaneWeapon):
Probability = 1
BaseName = "Focus"
EssentialPart = "devotion"
AttackRange = 3
RangePenalty = 3
BaseWeight = 1.0
# Damage
DamageRange = [2, 5]
DamageType = 'force'
DamageModifier = "sense"
DamageScalar = 2.4
ScalingRequirement = 6
EnergyCost = 5
# Distribution
Distribution = np.random.gamma
DistributionProperties = (2, 0.3)
BaseStatInfluences = {
'sense': {'requirement': 8, 'scaling_factor': 3.5, 'cutoff': 200},
'acuity': {'requirement': 0, 'scaling_factor': 1.2, 'cutoff': 100},
}
def failing_requirements(self, wielder):
if wielder.arcane_energy < self.EnergyCost:
return ['Not enough Arcane Energy to use {} -- need {}, have {}'.format(self.alias(), self.EnergyCost, wielder.arcane_energy)]
def on_calculate_attack(self, cmd):
cmd.args['player_lifeform'].arcane_energy -= self.EnergyCost
| agpl-3.0 | -8,065,563,989,550,132,000 | 27.583333 | 138 | 0.640428 | false | 3.298077 | false | false | false |
fishroot/qdeep | lib/qdeep/objects/script/__init__.py | 1 | 6352 | # -*- coding: utf-8 -*-
__author__ = 'Patrick Michl'
__email__ = '[email protected]'
__license__ = 'GPLv3'
import nemoa
import qdeep.objects.common
from PySide import QtGui, QtCore
class Editor(qdeep.objects.common.Editor):
objType = 'script'
def createCentralWidget(self):
self.textArea = QtGui.QTextEdit()
self.textArea.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAsNeeded)
self.textArea.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAsNeeded)
self.textArea.document().contentsChanged.connect(
self.documentWasModified)
font = QtGui.QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
font.setPointSize(10)
self.textArea.setFont(font)
self.textArea.setAcceptDrops(True)
self.highlighter = Highlighter(self.textArea.document())
self.setCentralWidget(self.textArea)
def createActions(self):
self.actRunScript = QtGui.QAction(
qdeep.common.getIcon('actions', 'debug-run.png'),
"Run Script", self,
shortcut = "F5",
statusTip = "Run python script",
triggered = self.runScript)
def createToolBars(self):
self.scriptToolBar = self.addToolBar("Script")
self.scriptToolBar.addAction(self.actRunScript)
def getModified(self):
return self.textArea.document().isModified()
def setModified(self, value = True):
self.textArea.document().setModified(value)
def loadFile(self, fileName):
file = QtCore.QFile(fileName)
if not file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, "MDI",
"Cannot read file %s:\n%s." % (
fileName, file.errorString()))
return False
instr = QtCore.QTextStream(file)
self.textArea.setPlainText(instr.readAll())
self.textArea.document().contentsChanged.connect(
self.documentWasModified)
return True
def saveFile(self, fileName):
file = QtCore.QFile(fileName)
if not file.open(QtCore.QFile.WriteOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, "MDI",
"Cannot write file %s:\n%s." % (fileName,
file.errorString()))
return False
outstr = QtCore.QTextStream(file)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
outstr << self.textArea.toPlainText()
QtGui.QApplication.restoreOverrideCursor()
self.setModified(False)
self.updateWindowTitle()
return True
def runScript(self):
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
nemoa.run(self.getName())
QtGui.QApplication.restoreOverrideCursor()
class Highlighter(QtGui.QSyntaxHighlighter):
def __init__(self, parent=None):
super(Highlighter, self).__init__(parent)
keywordFormat = QtGui.QTextCharFormat()
keywordFormat.setForeground(QtCore.Qt.darkBlue)
keywordFormat.setFontWeight(QtGui.QFont.Bold)
keywordPatterns = ["\\bchar\\b", "\\bclass\\b", "\\bconst\\b",
"\\bdouble\\b", "\\benum\\b", "\\bexplicit\\b", "\\bfriend\\b",
"\\binline\\b", "\\bint\\b", "\\blong\\b", "\\bnamespace\\b",
"\\boperator\\b", "\\bprivate\\b", "\\bprotected\\b",
"\\bpublic\\b", "\\bshort\\b", "\\bsignals\\b", "\\bsigned\\b",
"\\bslots\\b", "\\bstatic\\b", "\\bstruct\\b",
"\\btemplate\\b", "\\btypedef\\b", "\\btypename\\b",
"\\bunion\\b", "\\bunsigned\\b", "\\bvirtual\\b", "\\bvoid\\b",
"\\bvolatile\\b", "\\bimport\\b", "\\bdef\\b",
"\\bTrue\\b", "\\bFalse\\b", "\\breturn\\b"]
self.highlightingRules = [(QtCore.QRegExp(pattern), keywordFormat)
for pattern in keywordPatterns]
classFormat = QtGui.QTextCharFormat()
classFormat.setFontWeight(QtGui.QFont.Bold)
classFormat.setForeground(QtCore.Qt.darkMagenta)
self.highlightingRules.append((QtCore.QRegExp("\\bQ[A-Za-z]+\\b"),
classFormat))
singleLineCommentFormat = QtGui.QTextCharFormat()
singleLineCommentFormat.setForeground(QtCore.Qt.red)
self.highlightingRules.append((QtCore.QRegExp("//[^\n]*"),
singleLineCommentFormat))
self.multiLineCommentFormat = QtGui.QTextCharFormat()
self.multiLineCommentFormat.setForeground(QtCore.Qt.red)
quotationFormat = QtGui.QTextCharFormat()
quotationFormat.setForeground(QtCore.Qt.darkGreen)
self.highlightingRules.append((QtCore.QRegExp("\".*\""),
quotationFormat))
self.highlightingRules.append((QtCore.QRegExp("'.*'"),
quotationFormat))
functionFormat = QtGui.QTextCharFormat()
functionFormat.setFontItalic(True)
functionFormat.setForeground(QtCore.Qt.blue)
self.highlightingRules.append((QtCore.QRegExp("\\b[A-Za-z0-9_]+(?=\\()"),
functionFormat))
self.commentStartExpression = QtCore.QRegExp("/\\*")
self.commentEndExpression = QtCore.QRegExp("\\*/")
def highlightBlock(self, text):
for pattern, format in self.highlightingRules:
expression = QtCore.QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
startIndex = 0
if self.previousBlockState() != 1:
startIndex = self.commentStartExpression.indexIn(text)
while startIndex >= 0:
endIndex = self.commentEndExpression.indexIn(text, startIndex)
if endIndex == -1:
self.setCurrentBlockState(1)
commentLength = len(text) - startIndex
else:
commentLength = endIndex - startIndex + self.commentEndExpression.matchedLength()
self.setFormat(startIndex, commentLength,
self.multiLineCommentFormat)
startIndex = self.commentStartExpression.indexIn(text,
startIndex + commentLength)
| gpl-3.0 | -1,445,960,790,410,959,600 | 37.035928 | 97 | 0.614137 | false | 3.916153 | false | false | false |
quantosauros/cppyProject | cppy/cybosPlus/cpRqRp/StockOrderCash.py | 1 | 1827 | # coding=utf-8
'''
Created on 2016. 8. 14.
@author: Jay
'''
from cppy.adaptor import CpRqRpClass
import win32com.client
@CpRqRpClass('CpTrade.CpTd0311')
class StockOrderCash(object):
'''
장내주식/코스닥주식/ELW 주문(현금주문) 데이터를 요청하고 수신한다.
'''
def __init__(self):
self.instCpTdUtil = win32com.client.Dispatch("CpTrade.CpTdUtil")
class InputType(enumerate):
SellOrBuy = 0 #주문종류코드 (1: 매도, 2:매수)
AccountNumber = 1 #계좌번호
StockCode = 3 #종목코드
OrderNumber = 4 #주문수량
OrderPrice = 5 #주문단가
class OutputType(enumerate):
AccountNumber = 1 #계좌번호
StockCode = 3 #종목코드
OrderNumber = 4 #주문수량
OrderPrice = 5 #주문단가
def setInputValue(self, inputTypes, inputValues):
self.inputTypes = inputTypes
self.inputValues = inputValues
def setOutputValue(self, outputTypes):
self.outputTypes = outputTypes
def request(self, com_obj):
self.instCpTdUtil.TradeInit()
for i in range(len(self.inputTypes)) :
com_obj.SetInputValue(self.inputTypes[i], self.inputValues[i])
#계좌번호
accountNumber = self.instCpTdUtil.AccountNumber[0]
com_obj.SetInputValue(1, accountNumber)
com_obj.Request()
def response(self, com_obj):
result = ""
for j in range(0, len(self.outputTypes)) :
value = com_obj.GetHeaderValue(self.outputTypes[j])
result += str(value) + "; "
print (result)
| mit | 1,099,986,482,034,040,200 | 26.525424 | 74 | 0.544319 | false | 2.61838 | false | false | false |
jieter/f-engrave | application/settings.py | 1 | 9320 | import os
def cast_boolean(value):
if type(value) is bool:
return bool(value)
elif len(value) > 1:
return value == 'True'
else:
return bool(int(value))
def cast_string(value):
value = str(value).strip()
value = value.replace('\\n', '\n')
# unquote string
if value.startswith('"') and value.endswith('"'):
return value[1:-1].strip()
else:
return value
CAST_TYPES = {
'str': cast_string,
'bool': cast_boolean,
'int': int,
'float': float
}
# Old names to maintain backwards compatibility while reading
# config files. Only supported while loading values
# from config files.
OLD_SETTING_NAMES = {
'gpost': 'gcode_postamble',
'gpre': 'gcode_preamble',
'bmp_long': 'bmp_longcurve',
'bmp_optto': 'bmp_opttolerance',
'bmp_turnp': 'bmp_turnpol',
'bmp_turds': 'bmp_turdsize',
'bmp_alpha': 'bmp_alphamax',
'v_drv_crner': 'v_drv_corner',
'v_stp_crner': 'v_step_corner',
'FEED': 'feedrate',
'PLUNGE': 'plunge_rate',
'WSPACE': 'word_space',
'CSPACE': 'char_space',
'LSPACE': 'line_space',
'TANGLE': 'text_angle',
'TCODE': 'text_code',
'H_CALC': 'height_calculation',
'XSCALE': 'xscale',
'YSCALE': 'yscale',
'STHICK': 'line_thickness',
'TRADIUS': 'text_radius',
'ZSAFE': 'zsafe',
'ZCUT': 'zcut',
}
CONFIG_FILENAME = 'config.ngc'
CONFIG_MARKER = '(fengrave_set '
CONFIG_TEMPLATE = CONFIG_MARKER + '%20s %s )'
TEXT_CODE = 'text_code'
CUT_TYPE_ENGRAVE = 'engrave'
CUT_TYPE_VCARVE = 'v-carve'
HOME_DIR = os.path.expanduser("~")
NGC_FILE = (HOME_DIR + "/None")
# IMAGE_FILE = (HOME_DIR + "/None")
IMAGE_FILE = (HOME_DIR + "/Desktop/None") # TEST
class Settings(object):
"""
Default values for the application settings.
"""
_defaults = {
'HOME_DIR': HOME_DIR,
'NGC_FILE': NGC_FILE,
'IMAGE_FILE': IMAGE_FILE,
'config_filename': CONFIG_FILENAME,
'batch': False,
'show_axis': True,
'show_box': True,
'show_thick': True,
'flip': False,
'mirror': False,
# text plotted on a circle with radius
'text_radius': 0.0,
'outer': True, # outside circle
'upper': True, # on top of cirle
'fontdex': False,
'useIMGsize': False,
# flip normals (V-carve side)
'v_flop': False,
# ball carve (ball nose cutter)
'b_carve': False,
# TODO is "BALL" shape valid, or is this covered by b_carve?
# options: 'VBIT', 'FLAT', 'BALL'
'bit_shape': 'VBIT',
# plot during v-carve calculation [GUI]
'v_pplot': False,
'inlay': False,
'no_comments': True,
# arc fitting, options 'none', 'center', 'radius'
'arc_fit': 'none',
'ext_char': False,
# disable variables in gcode [GCODE]
'var_dis': True,
# cleanup cut directions
'clean_P': True,
'clean_X': True,
'clean_Y': False,
# V-Bit cut directions
'v_clean_P': False,
'v_clean_X': True,
'v_clean_Y': False,
'yscale': 50.8,
'xscale': 100.0,
'line_space': 1.2,
'char_space': 25,
'word_space': 100,
'text_angle': 0.0,
# safe height [GCODE]
'zsafe': 5.0,
# engraving depth [GCODE]
'zcut': -0.1,
# derived value
'max_cut': 0.0,
'line_thickness': 0.25,
'border_thickness': 0.5,
# options: 'Default',
# 'Top-Left', 'Top-Center', 'Top-Right',
# 'Mid-Left', 'Mid-Center', 'Mid-Right',
# 'Bot-Left', 'Bot-Center', 'Bot-Right'
'origin': 'Default',
# options: 'Left', 'Right', 'Center'
'justify': 'Left',
# options: 'in', 'mm'
'units': 'mm',
# options: 'in/min', 'mm/min'
'feed_units': 'mm/min',
# horizontal feedrate [GCODE]
'feedrate': 60.0,
# feedrate for plunging into stock [GCODE]
'plunge_rate': 10.0,
# which bounding boxes are used to calculate line height
# options: 'max_all', 'max_use'
'height_calculation': 'max_use',
# Add a box/circle around plot
'plotbox': False,
# Gap between box and engraving
'boxgap': 6.35,
# font location and name
'fontdir': 'fonts',
'fontfile': 'normal.cxf',
# options: 'engrave', 'v-carve'
'cut_type': CUT_TYPE_ENGRAVE,
# 'cut_type': CUT_TYPE_VCARVE,
# options: 'text', 'image'
'input_type': 'text',
# 'input_type': 'image',
# v-cutter parameters
# options: 'scorch', 'voronoi'
'v_strategy': 'scorch',
'v_bit_angle': 60,
'v_bit_dia': 3.0,
'v_depth_lim': 0.0,
'v_drv_corner': 135,
'v_step_corner': 200,
'v_step_len': 0.254,
# v-carve loop accuracy
'v_acc': 0.00254,
'allowance': 0.0,
# options: 'chr', 'all'
'v_check_all': 'all',
'v_rough_stk': 0.0,
'v_max_cut': 0.0,
# options: 'black', 'white', 'right', 'left', 'minority', 'majority', or 'random'
'bmp_turnpol': 'minority',
'bmp_turdsize': 2,
'bmp_alphamax': 1.0,
'bmp_opttolerance': 0.2,
'bmp_longcurve': True,
'xorigin': 0.0,
'yorigin': 0.0,
'segarc': 5.0,
'accuracy': 0.001,
# diameter of the cleanup bit
'clean_dia': 3.0,
# clean-up step-over as percentage of the clean-up bit diameter
'clean_step': 50,
# Width of the clean-up search area (obsolete before or since v1.65)
'clean_w': 50.8,
'clean_v': 1.27,
'clean_name': '_clean',
# G-Code Default Preamble
#
# G17 : sets XY plane
# G64 P0.003 : G64 P- (motion blending tolerance set to 0.003 (units))
# G64 without P option keeps the best speed possible, no matter how
# far away from the programmed point you end up.
# M3 S3000 : Spindle start at 3000
# M7 : Turn mist coolant on
'gcode_preamble': 'G17 G64 P0.003 M3 S3000 M7',
# G-Code Default Postamble
#
# M5 : Stop Spindle
# M9 : Turn all coolant off
# M2 : End Program
'gcode_postamble': 'M5 M9 M2',
'default_text': 'OOF-Engrave',
'text_code': '',
}
def __init__(self, filename=None, autoload=False):
self._settings = self._defaults.copy()
self._text_code = u''
if filename is not None:
self.from_configfile(filename)
elif autoload:
files_to_try = (
CONFIG_FILENAME,
os.path.expanduser('~') + os.path.sep + CONFIG_FILENAME,
os.path.expanduser('~') + os.path.sep + '.fengraverc'
)
available = [c for c in files_to_try if os.path.isfile(c)]
if len(available) > 0:
self.from_configfile(available[0])
def __iter__(self):
return self._settings.items()
def type(self, name):
return str(type(self._settings[name]))[7:-2]
def set(self, name, value):
if name == TEXT_CODE:
self._set_text_code(value)
else:
cast = CAST_TYPES[self.type(name)]
self._settings[name] = cast(value)
def get(self, name):
return self._settings[name]
# only for use in C-API calls
def get_dict(self):
return self._settings
def reset(self, name=None):
if name is None:
self._settings = self._defaults.copy()
else:
self.set(name, self._defaults[name])
def has_setting(self, name):
return name in self._settings
def get_fontfile(self):
return self.get('fontdir') + os.path.sep + self.get('fontfile')
def from_configfile(self, filename):
with open(filename, 'r') as config:
for line in config.readlines():
if not line.startswith(CONFIG_MARKER):
continue
line = line[len(CONFIG_MARKER):].strip()
name = line.split(' ')[0].strip()
setting = line[len(name):-1].strip()
if not self.has_setting(name) and name in OLD_SETTING_NAMES:
name = OLD_SETTING_NAMES[name]
try:
self.set(name, setting)
except KeyError:
print 'Setting not found:', name # TODO
def to_gcode(self):
gcode = [CONFIG_TEMPLATE % (key, str(value).replace('\n', '\\n'))
for key, value in self._settings.items()]
return gcode
def get_text_code(self):
return self._text_code
def _set_text_code(self, line):
text_code = u''
code_list = line.split()
for char in code_list:
try:
text_code += "%c" % unichr(int(char))
except:
text_code += "%c" % chr(int(char))
self._text_code = text_code
def __str__(self):
return 'Settings:\n' + ('\n'.join([', '.join(map(str, l)) for l in self._settings.items()]))
| gpl-3.0 | -4,150,245,928,453,565,400 | 25.704871 | 100 | 0.517275 | false | 3.370705 | true | false | false |
ic-labs/django-icekit | icekit/api/images/serializers.py | 1 | 2783 | from django.apps import apps
from rest_framework import serializers
from rest_framework.settings import api_settings
from drf_queryfields import QueryFieldsMixin
from icekit.api.base_serializers import WritableSerializerHelperMixin, \
WritableRelatedFieldSettings
Image = apps.get_model('icekit_plugins_image.Image')
MediaCategory = apps.get_model('icekit.MediaCategory')
class MediaCategorySerializer(serializers.ModelSerializer):
# Redefine `name` field here to avoid `unique=True` constraint that will
# be unavoidably applied by DRF validators if we leave the field to be
# autogenerated based on the model.
name = serializers.CharField(
max_length=255,
read_only=False,
required=False,
)
class Meta:
model = MediaCategory
fields = ['id', 'name']
extra_kwargs = {
'id': {
'read_only': False,
'required': False,
},
}
class ImageSerializer(
WritableSerializerHelperMixin,
QueryFieldsMixin,
serializers.HyperlinkedModelSerializer
):
"""
A serializer for an ICEkit Image.
"""
categories = MediaCategorySerializer(
many=True,
)
class Meta:
model = Image
fields = [
api_settings.URL_FIELD_NAME,
'id',
'image',
'width',
'height',
'title',
'alt_text',
'caption',
'credit',
'source',
'external_ref',
'categories',
'license',
'notes',
'date_created',
'date_modified',
'is_ok_for_web',
'is_cropping_allowed',
]
extra_kwargs = {
'url': {
'lookup_field': 'pk',
'view_name': 'api:image-api-detail',
},
}
writable_related_fields = {
'categories': WritableRelatedFieldSettings(
lookup_field=['id', 'name'], can_create=True),
}
# TODO It is probably not a good idea to allow API user to set auto-gen ID
# field, but this is the only way I have found (so far) to allow ID to be
# passed through API to relate existing images.
class RelatedImageSerializer(ImageSerializer):
"""
A serializer for an ICEkit Image relationships that exposes the ID primary
key field to permit referring to existing images by ID, instead of needing
to upload an actual image file every time.
"""
class Meta(ImageSerializer.Meta):
extra_kwargs = {
'id': {
'read_only': False,
'required': False,
},
'image': {
'required': False,
}
}
| mit | 2,230,048,733,898,857,700 | 26.83 | 78 | 0.564499 | false | 4.403481 | false | false | false |
rmcauley/rainwave | rainwave/playlist_objects/artist.py | 1 | 5684 | from libs import db
from libs import config
from rainwave.playlist_objects.metadata import (
AssociatedMetadata,
MetadataUpdateError,
make_searchable_string,
)
class Artist(AssociatedMetadata):
select_by_name_query = "SELECT artist_id AS id, artist_name AS name FROM r4_artists WHERE lower(artist_name) = lower(%s)"
select_by_id_query = "SELECT artist_id AS id, artist_name AS name FROM r4_artists WHERE artist_id = %s"
select_by_song_id_query = 'SELECT r4_artists.artist_id AS id, r4_artists.artist_name AS name, r4_song_artist.artist_is_tag AS is_tag, artist_order AS "order" FROM r4_song_artist JOIN r4_artists USING (artist_id) WHERE song_id = %s ORDER BY artist_order'
disassociate_song_id_query = (
"DELETE FROM r4_song_artist WHERE song_id = %s AND artist_id = %s"
)
associate_song_id_query = "INSERT INTO r4_song_artist (song_id, artist_id, artist_is_tag, artist_order) VALUES (%s, %s, %s, %s)"
has_song_id_query = "SELECT COUNT(song_id) FROM r4_song_artist WHERE song_id = %s AND artist_id = %s"
check_self_size_query = "SELECT COUNT(song_id) FROM r4_song_artist JOIN r4_songs USING (song_id) WHERE artist_id = %s AND song_verified = TRUE"
delete_self_query = "DELETE FROM r4_artists WHERE artist_id = %s"
# needs to be specialized because of artist_order
def associate_song_id(self, song_id, is_tag=None, order=None):
if not order and not self.data.get("order"):
order = db.c.fetch_var(
"SELECT MAX(artist_order) FROM r4_song_artist WHERE song_id = %s",
(song_id,),
)
if not order:
order = -1
order += 1
elif not order:
order = self.data["order"]
self.data["order"] = order
if is_tag == None:
is_tag = self.is_tag
else:
self.is_tag = is_tag
if db.c.fetch_var(self.has_song_id_query, (song_id, self.id)) > 0:
pass
else:
if not db.c.update(
self.associate_song_id_query, (song_id, self.id, is_tag, order)
):
raise MetadataUpdateError(
"Cannot associate song ID %s with %s ID %s"
% (song_id, self.__class__.__name__, self.id)
)
def _insert_into_db(self):
self.id = db.c.get_next_id("r4_artists", "artist_id")
return db.c.update(
"INSERT INTO r4_artists (artist_id, artist_name, artist_name_searchable) VALUES (%s, %s, %s)",
(self.id, self.data["name"], make_searchable_string(self.data["name"])),
)
def _update_db(self):
return db.c.update(
"UPDATE r4_artists SET artist_name = %s, artist_name_searchable = %s WHERE artist_id = %s",
(self.data["name"], make_searchable_string(self.data["name"]), self.id),
)
def _start_cooldown_db(self, sid, cool_time):
# Artists don't have cooldowns on Rainwave.
pass
def _start_election_block_db(self, sid, num_elections):
# Artists don't block elections either (OR DO THEY) (they don't)
pass
def load_all_songs(self, sid, user_id=1):
all_songs = db.c.fetch_all(
"SELECT r4_song_artist.song_id AS id, "
"r4_songs.song_origin_sid AS sid, "
"song_title AS title, "
"CAST(ROUND(CAST(song_rating AS NUMERIC), 1) AS REAL) AS rating, "
"song_exists AS requestable, "
"song_length AS length, "
"song_cool AS cool, "
"song_cool_end AS cool_end, "
"song_url as url, song_link_text as link_text, "
"COALESCE(song_rating_user, 0) AS rating_user, "
"COALESCE(song_fave, FALSE) AS fave, "
"album_name, r4_albums.album_id "
"FROM r4_song_artist "
"JOIN r4_songs USING (song_id) "
"JOIN r4_albums USING (album_id) "
"LEFT JOIN r4_album_sid ON (r4_albums.album_id = r4_album_sid.album_id AND r4_album_sid.sid = %s) "
"LEFT JOIN r4_song_sid ON (r4_songs.song_id = r4_song_sid.song_id AND r4_song_sid.sid = %s) "
"LEFT JOIN r4_song_ratings ON (r4_song_artist.song_id = r4_song_ratings.song_id AND r4_song_ratings.user_id = %s) "
"WHERE r4_song_artist.artist_id = %s AND r4_songs.song_verified = TRUE "
"ORDER BY song_exists DESC, album_name, song_title",
(sid, sid, user_id, self.id),
)
# And of course, now we have to burn extra CPU cycles to make sure the right album name is used and that we present the data
# in the same format seen everywhere else on the API. Still, much faster then loading individual song objects.
self.data["all_songs"] = {}
for configured_sids in config.station_ids:
self.data["all_songs"][configured_sids] = {}
requestable = True if user_id > 1 else False
for song in all_songs:
if not song["sid"] in config.station_ids:
continue
song["requestable"] = requestable and song["requestable"]
if not song["album_id"] in self.data["all_songs"][song["sid"]]:
self.data["all_songs"][song["sid"]][song["album_id"]] = []
self.data["all_songs"][song["sid"]][song["album_id"]].append(song)
song["albums"] = [
{
"name": song.pop("album_name"),
"id": song.pop("album_id"),
}
]
def to_dict(self, user=None):
d = super(Artist, self).to_dict(user)
d["order"] = self.data["order"]
return d
| gpl-2.0 | 7,762,295,732,175,003,000 | 46.764706 | 257 | 0.574771 | false | 3.281755 | false | false | false |
dgjnpr/py-junos-eznc | lib/jnpr/junos/factory/view.py | 1 | 8729 | import warnings
from contextlib import contextmanager
from copy import deepcopy
from lxml import etree
from jnpr.junos.factory.viewfields import ViewFields
class View(object):
"""
View is the base-class that makes extracting values from XML
data appear as objects with attributes.
"""
ITEM_NAME_XPATH = 'name'
FIELDS = {}
GROUPS = None
# -------------------------------------------------------------------------
# CONSTRUCTOR
# -------------------------------------------------------------------------
def __init__(self, table, view_xml):
"""
:table:
instance of the RunstatTable
:view_xml:
this should be an lxml etree Elemenet object. This
constructor also accepts a list with a single item/XML
"""
# if as_xml is passed as a list, make sure it only has
# a single item, common response from an xpath search
if isinstance(view_xml, list):
if 1 == len(view_xml):
view_xml = view_xml[0]
else:
raise ValueError("constructor only accepts a single item")
# now ensure that the thing provided is an lxml etree Element
if not isinstance(view_xml, etree._Element):
raise ValueError("constructor only accecpts lxml.etree._Element")
self._table = table
self.ITEM_NAME_XPATH = table.ITEM_NAME_XPATH
self._init_xml(view_xml)
def _init_xml(self, given_xml):
self._xml = given_xml
if self.GROUPS is not None:
self._groups = {}
for xg_name, xg_xpath in self.GROUPS.items():
xg_xml = self._xml.xpath(xg_xpath)
# @@@ this is technically an error; need to trap it
if not len(xg_xml):
continue
self._groups[xg_name] = xg_xml[0]
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def T(self):
""" return the Table instance for the View """
return self._table
@property
def D(self):
""" return the Device instance for this View """
return self.T.D
@property
def name(self):
""" return the name of view item """
if self.ITEM_NAME_XPATH is None:
return self._table.D.hostname
if isinstance(self.ITEM_NAME_XPATH, str):
# simple key
return self._xml.findtext(self.ITEM_NAME_XPATH).strip()
else:
# composite key
# return tuple([self.xml.findtext(i).strip() for i in
# self.ITEM_NAME_XPATH])
return tuple([self.xml.xpath(i)[0].text.strip()
for i in self.ITEM_NAME_XPATH])
# ALIAS key <=> name
key = name
@property
def xml(self):
""" returns the XML associated to the item """
return self._xml
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def keys(self):
""" list of view keys, i.e. field names """
return self.FIELDS.keys()
def values(self):
""" list of view values """
return [getattr(self, field) for field in self.keys()]
def items(self):
""" list of tuple(key,value) """
return zip(self.keys(), self.values())
def _updater_instance(self, more):
""" called from extend """
if hasattr(more, 'fields'):
self.FIELDS = deepcopy(self.__class__.FIELDS)
self.FIELDS.update(more.fields.end)
if hasattr(more, 'groups'):
self.GROUPS = deepcopy(self.__class__.GROUPS)
self.GROUPS.update(more.groups)
def _updater_class(self, more):
""" called from extend """
if hasattr(more, 'fields'):
self.FIELDS.update(more.fields.end)
if hasattr(more, 'groups'):
self.GROUPS.update(more.groups)
@contextmanager
def updater(self, fields=True, groups=False, all=True, **kvargs):
"""
provide the ability for subclassing objects to extend the
definitions of the fields. this is implemented as a
context manager with the form called from the subclass
constructor:
with self.extend() as more:
more.fields = <dict>
more.groups = <dict> # optional
"""
# ---------------------------------------------------------------------
# create a new object class so we can attach stuff to it arbitrarily.
# then pass that object to the caller, yo!
# ---------------------------------------------------------------------
more = type('RunstatViewMore', (object,), {})()
if fields is True:
more.fields = RunstatMakerViewFields()
# ---------------------------------------------------------------------
# callback through context manager
# ---------------------------------------------------------------------
yield more
updater = self._updater_class if all is True else \
self._updater_instance
updater(more)
def asview(self, view_cls):
""" create a new View object for this item """
return view_cls(self._table, self._xml)
def refresh(self):
"""
~~~ EXPERIMENTAL ~~~
refresh the data from the Junos device. this only works if the table
provides an "args_key", does not update the original table, just this
specific view/item
"""
warnings.warn("Experimental method: refresh")
if self._table.can_refresh is not True:
raise RuntimeError("table does not support this feature")
# create a new table instance that gets only the specific named
# value of this view
tbl_xml = self._table._rpc_get(self.name)
new_xml = tbl_xml.xpath(self._table.ITEM_XPATH)[0]
self._init_xml(new_xml)
return self
# -------------------------------------------------------------------------
# OVERLOADS
# -------------------------------------------------------------------------
def __repr__(self):
""" returns the name of the View with the associate item name """
return "%s:%s" % (self.__class__.__name__, self.name)
def __getattr__(self, name):
"""
returns a view item value, called as :obj.name:
"""
item = self.FIELDS.get(name)
if item is None:
raise ValueError("Unknown field: '%s'" % name)
if 'table' in item:
# if this is a sub-table, then return that now
return item['table'](self.D, self._xml)
# otherwise, not a sub-table, and handle the field
astype = item.get('astype', str)
if 'group' in item:
found = self._groups[item['group']].xpath(item['xpath'])
else:
found = self._xml.xpath(item['xpath'])
len_found = len(found)
if astype is bool:
# handle the boolean flag case separately
return bool(len_found)
if not len_found:
# even for the case of numbers, do not set the value. we
# want to detect "does not exist" vs. defaulting to 0
# -- 2013-nov-19, JLS.
return None
try:
# added exception handler to catch malformed xpath expressesion
# -- 2013-nov-19, JLS.
# added support to handle multiple xpath values, i.e. a list of
# things that have the same xpath expression (common in configs)
# -- 2031-dec-06, JLS
# added support to use the element tag if the text is empty
def _munch(x):
as_str = x if isinstance(x, str) else x.text
if as_str is not None:
as_str = as_str.strip()
if not as_str:
as_str = x.tag # use 'not' to test for empty
return astype(as_str)
if 1 == len_found:
return _munch(found[0])
return [_munch(this) for this in found]
except:
raise RuntimeError("Unable to handle field:'%s'" % name)
# and if we are here, then we didn't handle the field.
raise RuntimeError("Unable to handle field:'%s'" % name)
def __getitem__(self, name):
"""
allow the caller to extract field values using :obj['name']:
the same way they would do :obj.name:
"""
return getattr(self, name)
| apache-2.0 | -8,300,979,280,519,996,000 | 33.366142 | 79 | 0.500745 | false | 4.567766 | false | false | false |
2baOrNot2ba/AntPat | scripts/viewJonespat_dual.py | 1 | 2897 | #!/usr/bin/env python
"""A simple viewer for Jones patterns for dual-polarized representations.
"""
import argparse
import numpy
import matplotlib.pyplot as plt
from antpat.reps.sphgridfun.pntsonsphere import ZenHemisphGrid
from antpat.dualpolelem import DualPolElem, jones2gIXR, IXRJ2IXRM
from antpat.reps.hamaker import convLOFARcc2DPE
import antpat.io.filetypes as antfiles
def plotJonesCanonical(theta, phi, jones, dpelemname):
normalize = True
dbscale = True
polarplt = True
IXRTYPE = 'IXR_J' # Can be IXR_J or IXR_M
g, IXRJ = jones2gIXR(jones)
IXRM = IXRJ2IXRM(IXRJ)
if IXRTYPE == 'IXR_J':
IXR = IXRJ
elif IXRTYPE == 'IXR_J':
IXR = IXRM
else:
raise RuntimeError("""Error: IXR type {} unknown.
Known types are IXR_J, IXR_M.""".format(IXRTYPE))
fig = plt.figure()
fig.suptitle(dpelemname)
plt.subplot(121, polar=polarplt)
if normalize:
g_max = numpy.max(g)
g = g/g_max
if dbscale:
g = 20*numpy.log10(g)
# nrlvls = 5
# g_lvls = numpy.max(g) - 3.0*numpy.arange(nrlvls)
plt.pcolormesh(phi, numpy.rad2deg(theta), g)
# plt.contour( phi, numpy.rad2deg(theta), g_dress, levels = g_lvls)
plt.colorbar()
plt.title('Amp gain')
plt.subplot(122, polar=polarplt)
plt.pcolormesh(phi, numpy.rad2deg(theta), 10*numpy.log10(IXR))
plt.colorbar()
plt.title('IXR_J')
plt.show()
def plotFFpat():
from antpat.reps.sphgridfun import tvecfun
for polchan in [0, 1]:
E_th = jones[:, :, polchan, 0].squeeze()
E_ph = jones[:, :, polchan, 1].squeeze()
tvecfun.plotvfonsph(THETA, PHI, E_th, E_ph, args.freq,
vcoordlist=['Ludwig3'], projection='orthographic')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("freq", type=float,
help="Frequency in Hertz")
parser.add_argument("filename", help="""
Filename of dual-polarization FF, Hamaker-Arts format,
or a single-polarization FF (p-channel)""")
parser.add_argument("filename_q", nargs='?',
help="""
Filename of second (q-channel) single-polarization FF.
""")
args = parser.parse_args()
if args.filename.endswith(antfiles.HamArtsuffix):
hp = convLOFARcc2DPE(args.filename, [args.freq])
elif args.filename.endswith(antfiles.FEKOsuffix):
hp = DualPolElem()
hp.load_ffes(args.filename, args.filename_q)
else:
raise RuntimeError("dual-pol pattern file type not known")
THETA, PHI = ZenHemisphGrid()
jones = hp.getJonesAlong([args.freq], (THETA, PHI))
plotFFpat()
# plotJonesCanonical(THETA, PHI, jones, os.path.basename(args.filename)
# + ' (' + str(args.freq/1e6) + ' MHz)')
| isc | 7,596,389,316,562,434,000 | 33.488095 | 78 | 0.613393 | false | 3.111708 | false | false | false |
bgmerrell/desmod | tests/test_timescale.py | 1 | 1953 | import pytest
from desmod.timescale import parse_time, scale_time
@pytest.mark.parametrize('test_input, expected', [
('12 s', (12, 's')),
('12s', (12, 's')),
('+12s', (12, 's')),
('-12s', (-12, 's')),
('12.0 s', (12.0, 's')),
('12. s', (12.0, 's')),
('+12.0 s', (12.0, 's')),
('-12.0 s', (-12.0, 's')),
('12.000 s', (12.0, 's')),
('1.2e1 s', (12.0, 's')),
('1.2e+1 s', (12.0, 's')),
('1.2e-1 s', (0.12, 's')),
('-1.2e-1 s', (-0.12, 's')),
('12.s', (12.0, 's')),
('12.0s', (12.0, 's')),
('12.000s', (12.0, 's')),
('1.2e1s', (12.0, 's')),
('.12e+2s', (12.0, 's')),
('.12s', (0.12, 's')),
('12 fs', (12, 'fs')),
('12 ps', (12, 'ps')),
('12 ns', (12, 'ns')),
('12 us', (12, 'us')),
('12 ms', (12, 'ms')),
('12.0ms', (12.0, 'ms')),
('s', (1, 's')),
('fs', (1, 'fs')),
])
def test_parse_time(test_input, expected):
m, u = parse_time(test_input)
assert (m, u) == expected
assert isinstance(m, type(expected[0]))
@pytest.mark.parametrize('test_input', [
'',
'123 s',
'123',
'123.0',
'123 S',
'123 Ms',
'123e1.3 s',
'+-123 s',
'123 ks',
'. s',
'1-.1 s',
'1e1.2 s',
])
def test_parse_time_except(test_input):
with pytest.raises(ValueError) as exc_info:
parse_time(test_input)
assert 'float' not in str(exc_info.value)
def test_parse_time_default():
assert parse_time('123', default_unit='ms') == (123, 'ms')
@pytest.mark.parametrize('input_t, input_tscale, expected', [
((1, 'us'), (1, 'us'), 1),
((1, 'us'), (10, 'us'), 0.1),
((1000, 'us'), (1, 'ms'), 1),
((1, 'us'), (100, 'ms'), 1e-5),
((50, 'ms'), (1, 'ns'), 50000000),
((5.2, 'ms'), (1, 'us'), 5200),
])
def test_scale_time(input_t, input_tscale, expected):
scaled = scale_time(input_t, input_tscale)
assert expected == scaled
assert isinstance(scaled, type(expected))
| mit | -2,659,281,447,644,491,300 | 24.697368 | 62 | 0.453661 | false | 2.453518 | true | false | false |
bourguet/operator_precedence_parsing | operator_precedence.py | 1 | 7999 | #! /usr/bin/env python3
import sys
import lexer
from tree import Node, CompositeNode
class SymbolDesc:
def __init__(self, symbol, lprio, rprio, evaluator):
self.symbol = symbol
self.lprio = lprio
self.rprio = rprio
self.evaluator = evaluator
self.value = None
def __repr__(self):
return '<Symbol {} {}/{}: {}>'.format(self.symbol, self.lprio, self.rprio, self.value)
def identity_evaluator(args):
if len(args) == 1 and type(args[0]) == SymbolDesc:
return Node(args[0].symbol)
else:
return CompositeNode('ID ERROR', args)
def binary_evaluator(args):
if len(args) != 3 or type(args[0]) == SymbolDesc or type(args[1]) != SymbolDesc or type(args[2]) == SymbolDesc:
return CompositeNode('BINARY ERROR', args)
return CompositeNode(args[1].symbol, [args[0], args[2]])
class Parser:
def __init__(self):
self.symbols = {}
self.symbols['$soi$'] = SymbolDesc('$soi$', 0, 0, None)
self.symbols['$eoi$'] = SymbolDesc('$eoi$', 0, 0, None)
self.reset()
def register_symbol(self, oper, lprio, rprio, evaluator=None):
if evaluator is None:
evaluator = binary_evaluator
if type(oper) is str:
self.symbols[oper] = SymbolDesc(oper, lprio, rprio, evaluator)
else:
for op in oper:
self.symbols[op] = SymbolDesc(op, lprio, rprio, evaluator)
def reset(self):
self.stack = [self.symbols['$soi$']]
def id_symbol(self, id):
return SymbolDesc(id, 1000, 1000, identity_evaluator)
def evaluate(self):
idx = len(self.stack)-1
if type(self.stack[idx]) != SymbolDesc:
idx -= 1
curprio = self.stack[idx].lprio
while type(self.stack[idx-1]) != SymbolDesc or self.stack[idx-1].rprio == curprio:
idx -= 1
if type(self.stack[idx]) == SymbolDesc:
curprio = self.stack[idx].lprio
args = self.stack[idx:]
self.stack = self.stack[:idx]
for i in args:
if type(i) == SymbolDesc:
self.stack.append(i.evaluator(args))
return
raise RuntimeError('Internal error: no evaluator found in {}'.format(args))
def tos_symbol(self):
idx = len(self.stack)-1
if type(self.stack[idx]) != SymbolDesc:
idx -= 1
return self.stack[idx]
def shift(self, sym):
while self.tos_symbol().rprio > sym.lprio:
self.evaluate()
self.stack.append(sym)
def push_eoi(self):
self.shift(self.symbols['$eoi$'])
def parse(self, s):
self.reset()
for tk in lexer.tokenize(s):
if tk.lexem in self.symbols:
self.shift(self.symbols[tk.lexem])
elif tk.kind == 'ID':
self.shift(self.id_symbol(tk))
elif tk.kind == 'NUMBER':
self.shift(self.id_symbol(tk))
else:
raise RuntimeError('Unexpected symbol: {}'.format(tk))
self.push_eoi()
if len(self.stack) != 3:
raise RuntimeError('Internal error: bad state of stack at end')
return self.stack[1]
def dump(self):
print('Stack')
for oper in self.stack:
print(' {}'.format(oper))
def open_parenthesis_evaluator(args):
if (len(args) == 3
and type(args[0]) == SymbolDesc and args[0].symbol == '('
and type(args[1]) != SymbolDesc
and type(args[2]) == SymbolDesc and args[2].symbol == ')'):
return args[1]
elif (len(args) == 3
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc and args[1].symbol == '('
and type(args[2]) == SymbolDesc and args[2].symbol == ')'):
return CompositeNode('call', [args[0]])
elif (len(args) == 4
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc and args[1].symbol == '('
and type(args[2]) != SymbolDesc
and type(args[3]) == SymbolDesc and args[3].symbol == ')'):
if args[2].token == ',':
callargs = args[2].children
else:
callargs = [args[2]]
callargs.insert(0, args[0])
return CompositeNode('call', callargs)
else:
return CompositeNode('( ERROR', args)
def close_parenthesis_evaluator(args):
return CompositeNode(') ERROR', args)
def open_bracket_evaluator(args):
return CompositeNode('get', [args[0], args[2]])
def close_bracket_evaluator(args):
return CompositeNode('] ERROR', args)
def coma_evaluator(args):
return CompositeNode(',', [x for x in args if type(x) != SymbolDesc])
def unary_evaluator(args):
if len(args) != 2:
return CompositeNode('UNARY ERROR', args)
if type(args[0]) == SymbolDesc and type(args[1]) != SymbolDesc:
return CompositeNode(args[0].symbol, [args[1]])
elif type(args[0]) != SymbolDesc and type(args[1]) == SymbolDesc:
return CompositeNode('post'+args[1].symbol, [args[0]])
else:
return CompositeNode('UNARY ERROR', args)
def unary_or_binary_evaluator(args):
if (len(args) == 2
and type(args[0]) == SymbolDesc
and type(args[1]) != SymbolDesc):
return CompositeNode(args[0].symbol, [args[1]])
elif (len(args) == 2
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc):
return CompositeNode('post'+args[1].symbol, [args[0]])
elif (len(args) == 3
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc
and type(args[2]) != SymbolDesc):
return CompositeNode(args[1].symbol, [args[0], args[2]])
else:
return CompositeNode('1,2-ARY ERROR', args)
def question_evaluator(args):
if (len(args) != 5
or type(args[0]) == SymbolDesc
or type(args[1]) != SymbolDesc or args[1].symbol != '?'
or type(args[2]) == SymbolDesc
or type(args[3]) != SymbolDesc or args[3].symbol != ':'
or type(args[4]) == SymbolDesc):
return CompositeNode('? ERROR', args)
return CompositeNode('?', [args[0], args[2], args[4]])
def colon_evaluator(args):
return CompositeNode(': ERROR', args)
def cexp_parser():
parser = Parser()
parser.register_symbol(',', 2, 2, coma_evaluator)
parser.register_symbol(['=', '*=', '/=', '%=', '+=', '-=', '<<=', '>>=', '&=', '|=', '^='], 5, 4)
parser.register_symbol('?', 7, 1.5, question_evaluator)
parser.register_symbol(':', 1.5, 6, colon_evaluator)
parser.register_symbol('||', 8, 9)
parser.register_symbol('&&', 10, 11)
parser.register_symbol('|', 12, 13)
parser.register_symbol('^', 14, 15)
parser.register_symbol('&', 16, 17, unary_or_binary_evaluator)
parser.register_symbol(['==', '!='], 18, 19)
parser.register_symbol(['<', '>', '<=', '>='], 20, 21)
parser.register_symbol(['<<', '>>'], 22, 23)
parser.register_symbol(['+', '-'], 24, 25, unary_or_binary_evaluator)
parser.register_symbol(['/', '%'], 26, 27)
parser.register_symbol(['*'], 26, 27, unary_or_binary_evaluator)
parser.register_symbol('**', 29, 28)
parser.register_symbol(['++', '--', '~', '!'], 31, 30, unary_evaluator) # +, -, *, & should be here
parser.register_symbol(['.', '->'], 32, 33)
parser.register_symbol('(', 100, 1, open_parenthesis_evaluator)
parser.register_symbol(')', 1, 100, close_parenthesis_evaluator)
parser.register_symbol('[', 100, 1, open_bracket_evaluator)
parser.register_symbol(']', 1, 100, close_bracket_evaluator)
return parser
def main(args):
parser = cexp_parser()
for s in args[1:]:
try:
exp = parser.parse(s)
print('{} -> {}'.format(s, exp))
except RuntimeError as run_error:
print('Unable to parse {}: {}'.format(s, run_error))
if __name__ == "__main__":
main(sys.argv)
| bsd-2-clause | -1,557,918,058,041,208,000 | 33.478448 | 115 | 0.56182 | false | 3.537815 | false | false | false |
eclee25/flu-SDI-exploratory-age | scripts/create_fluseverity_figs/export_zOR_classif.py | 1 | 10068 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 6/18/14
###Function: Export zOR retrospective and early warning classifications into csv file format (SDI and ILINet, national and regional for SDI)
### Use nation-level peak-based retrospective classification for SDI region analysis
###Import data: R_export/OR_zip3_week_outpatient_cl.csv, R_export/allpopstat_zip3_season_cl.csv
#### These data were cleaned with data_extraction/clean_OR_hhsreg_week_outpatient.R and exported with OR_zip3_week.sql
#### allpopstat_zip3_season_cl.csv includes child, adult, and other populations; popstat_zip3_season_cl.csv includes only child and adult populations
###Command Line: python export_zOR_classif.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
## local modules ##
import functions as fxn
### data structures ###
### called/local plotting parameters ###
nw = fxn.gp_normweeks # number of normalization weeks in baseline period
### functions ###
def print_dict_to_file(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write("season,mn_retro,mn_early\n")
for key, value in dic.items():
fwriter.write("%s,%s,%s\n" % (key, value[0], value[1]))
def print_dict_to_file2(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write("season,region,mn_retro,mn_early\n")
for key, value in dic.items():
fwriter.write("%s,%s,%s,%s\n" % (key[0], key[1], value[0], value[1]))
def print_dict_to_file3(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write('season,state,mn_retro,mn_early\n')
for key, value in dic.items():
fwriter.write("%s,%s,%s,%s\n" % (key[0], key[1], value[0], value[1]))
##############################################
# SDI NATIONAL
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
# ##############################################
# # ILINet NATIONAL
# # national files
# incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/all_cdc_source_data.csv','r')
# incidin.readline() # remove header
# incid = csv.reader(incidin, delimiter=',')
# popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Census/Import_Data/totalpop_age_Census_98-14.csv', 'r')
# pop = csv.reader(popin, delimiter=',')
# thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
# thanksin.readline() # remove header
# thanks=csv.reader(thanksin, delimiter=',')
# # dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
# d_wk, d_incid, d_OR = fxn.ILINet_week_OR_processing(incid, pop)
# d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# # d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
# d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# # d_ILINet_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
# d_ILINet_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
##############################################
# SDI REGION: nation-level peak-basesd retrospective classification
# regional files
reg_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
reg_incidin.readline()
regincid = csv.reader(reg_incidin, delimiter=',')
reg_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
reg_popin.readline()
regpop = csv.reader(reg_popin, delimiter=',')
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
_, d_zip3_reg, d_incid_reg, d_OR_reg = fxn.week_OR_processing_region(regincid, regpop)
# dict_zOR_reg[(week, hhsreg)] = zOR
d_zOR_reg = fxn.week_zOR_processing_region(d_wk, d_OR_reg)
# dict_incid53ls_reg[(seasonnum, region)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, region)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_reg[(seasonnum, region)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_reg, d_OR53ls_reg, d_zOR53ls_reg = fxn.week_plotting_dicts_region(d_wk, d_incid_reg, d_OR_reg, d_zOR_reg)
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
d_classifindex = fxn.classif_zOR_index(d_wk, d_incid53ls, d_incid53ls_reg, 'region', thanks)
# d_classifzOR_reg[(seasonnum, region)] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR_reg = fxn.classif_zOR_region_processing(d_classifindex, d_wk, d_zOR53ls_reg)
##############################################
# SDI STATE: nation-level peak-basesd retrospective classification
# import same files as regional files
reg_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
reg_incidin.readline()
regincid = csv.reader(reg_incidin, delimiter=',')
reg_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
reg_popin.readline()
regpop = csv.reader(reg_popin, delimiter=',')
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
_, d_zip3_reg, d_incid_state, d_OR_state = fxn.week_OR_processing_state(regincid, regpop)
# dict_zOR_state[(week, state)] = zOR
d_zOR_state = fxn.week_zOR_processing_state(d_wk, d_OR_state)
# dict_incid53ls_state[(seasonnum, state)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, state)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_state[(seasonnum, state)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_state, d_OR53ls_state, d_zOR53ls_state = fxn.week_plotting_dicts_state(d_wk, d_incid_state, d_OR_state, d_zOR_state)
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
d_classifindex = fxn.classif_zOR_index_state(d_wk, d_incid53ls, d_incid53ls_state, 'state', thanks)
# d_classifzOR_state[(seasonnum, state)] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR_state = fxn.classif_zOR_state_processing(d_classifindex, d_wk, d_zOR53ls_state)
##############################################
print d_classifzOR
print d_classifzOR_reg
# fn1 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_national_classifications_%s.csv' %(nw)
# print_dict_to_file(d_classifzOR, fn1)
# fn2 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/ILINet_national_classifications_%s.csv' %(nw)
# print_dict_to_file(d_ILINet_classifzOR, fn2)
fn3 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_regional_classifications_%sreg.csv' %(nw)
print_dict_to_file2(d_classifzOR_reg, fn3)
fn4 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_state_classifications_%sst.csv' %(nw)
print_dict_to_file3(d_classifzOR_state, fn4) | mit | -8,794,785,964,571,561,000 | 58.934524 | 206 | 0.698153 | false | 2.526474 | false | false | false |
RoboCupULaval/StrategyIA | ai/GameDomainObjects/ball.py | 1 | 1041 | # Under MIT License, see LICENSE.txt
from typing import Dict
from Util import Position
class Ball:
def __init__(self, position=Position()):
self._position = position
self._velocity = Position()
def update(self, new_dict: Dict):
self.position = new_dict['position']
self.velocity = new_dict['velocity']
def is_moving_fast(self, fast_speed = 600.0): # mm/s
return fast_speed < self.velocity.norm
def is_mobile(self, immobile_speed = 300.0): # mm/s
return immobile_speed < self.velocity.norm
def is_immobile(self):
return not self.is_mobile()
@property
def position(self) -> Position:
return self._position
@position.setter
def position(self, value):
assert isinstance(value, Position)
self._position = value
@property
def velocity(self) -> Position:
return self._velocity
@velocity.setter
def velocity(self, value):
assert isinstance(value, Position)
self._velocity = value
| mit | -1,920,990,554,748,314,000 | 23.209302 | 56 | 0.630163 | false | 4.034884 | false | false | false |
RogerRueegg/lvw-young-talents | src/profiles/views.py | 1 | 2796 | from __future__ import unicode_literals
from django.views import generic
from django.shortcuts import get_object_or_404, redirect
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from . import forms
from . import models
import datetime
class ShowProfile(LoginRequiredMixin, generic.TemplateView):
template_name = "profiles/show_profile.html"
http_method_names = ['get']
def get(self, request, *args, **kwargs):
slug = self.kwargs.get('slug')
if slug:
profile = get_object_or_404(models.Profile, slug=slug)
user = profile.user
else:
user = self.request.user
if user == self.request.user:
kwargs["editable"] = True
kwargs["show_user"] = user
return super(ShowProfile, self).get(request, *args, **kwargs)
class EditProfile(LoginRequiredMixin, generic.TemplateView):
template_name = "profiles/edit_profile.html"
http_method_names = ['get', 'post']
def get(self, request, *args, **kwargs):
user = self.request.user
if "user_form" not in kwargs:
kwargs["user_form"] = forms.UserForm(instance=user)
if "profile_form" not in kwargs:
kwargs["profile_form"] = forms.ProfileForm(instance=user.profile)
return super(EditProfile, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
user = self.request.user
user_form = forms.UserForm(request.POST, instance=user)
profile_form = forms.ProfileForm(request.POST,
request.FILES,
instance=user.profile)
if not (user_form.is_valid() and profile_form.is_valid()):
message = ""
if profile_form.errors:
if 'phone_number' in profile_form.errors.keys():
message += "Bitte gibt Deine Natelnummer wie folgt ein: +41791234567. "
if 'bdate' in profile_form.errors.keys():
message += "Bitte gibt das Geburtsdatum wie folgt ein: 2002-01-15 für 15. Januar 2002"
messages.error(request, message)
user_form = forms.UserForm(instance=user)
profile_form = forms.ProfileForm(instance=user.profile)
return super(EditProfile, self).get(request,
user_form=user_form,
profile_form=profile_form)
# Both forms are fine. Time to save!
user_form.save()
profile = profile_form.save(commit=False)
profile.user = user
profile.save()
messages.success(request, "Profile details saved!")
return redirect("profiles:show_self")
| mit | 7,446,359,648,485,783,000 | 41.348485 | 106 | 0.598927 | false | 4.086257 | false | false | false |
Erotemic/ibeis | super_setup.py | 1 | 26677 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Requirements:
pip install gitpython click ubelt
"""
import re
from os.path import exists
from os.path import join
from os.path import dirname
from os.path import abspath
import ubelt as ub
import functools
class ShellException(Exception):
"""
Raised when shell returns a non-zero error code
"""
class DirtyRepoError(Exception):
"""
If the repo is in an unexpected state, its very easy to break things using
automated scripts. To be safe, we don't do anything. We ensure this by
raising this error.
"""
def parse_version(package):
"""
Statically parse the version number from __init__.py
CommandLine:
python -c "import setup; print(setup.parse_version('ovharn'))"
"""
from os.path import dirname, join
import ast
init_fpath = join(dirname(__file__), package, '__init__.py')
with open(init_fpath) as file_:
sourcecode = file_.read()
pt = ast.parse(sourcecode)
class VersionVisitor(ast.NodeVisitor):
def visit_Assign(self, node):
for target in node.targets:
if target.id == '__version__':
self.version = node.value.s
visitor = VersionVisitor()
visitor.visit(pt)
return visitor.version
class GitURL(object):
"""
Represent and transform git urls between protocols defined in [3]_.
The code in GitURL is largely derived from [1]_ and [2]_.
Credit to @coala and @FriendCode.
Note:
while this code aims to suport protocols defined in [3]_, it is only
tested for specific use cases and therefore might need to be improved.
References:
.. [1] https://github.com/coala/git-url-parse
.. [2] https://github.com/FriendCode/giturlparse.py
.. [3] https://git-scm.com/docs/git-clone#URLS
Example:
>>> self = GitURL('[email protected]:computer-vision/netharn.git')
>>> print(ub.repr2(self.parts()))
>>> print(self.format('ssh'))
>>> print(self.format('https'))
>>> self = GitURL('https://gitlab.kitware.com/computer-vision/netharn.git')
>>> print(ub.repr2(self.parts()))
>>> print(self.format('ssh'))
>>> print(self.format('https'))
"""
SYNTAX_PATTERNS = {
# git allows for a url style syntax
'url': re.compile(r'(?P<transport>\w+://)'
r'((?P<user>\w+[^@]*@))?'
r'(?P<host>[a-z0-9_.-]+)'
r'((?P<port>:[0-9]+))?'
r'/(?P<path>.*\.git)'),
# git allows for ssh style syntax
'ssh': re.compile(r'(?P<user>\w+[^@]*@)'
r'(?P<host>[a-z0-9_.-]+)'
r':(?P<path>.*\.git)'),
}
r"""
Ignore:
# Helper to build the parse pattern regexes
def named(key, regex):
return '(?P<{}>{})'.format(key, regex)
def optional(pat):
return '({})?'.format(pat)
parse_patterns = {}
# Standard url format
transport = named('transport', r'\w+://')
user = named('user', r'\w+[^@]*@')
host = named('host', r'[a-z0-9_.-]+')
port = named('port', r':[0-9]+')
path = named('path', r'.*\.git')
pat = ''.join([transport, optional(user), host, optional(port), '/', path])
parse_patterns['url'] = pat
pat = ''.join([user, host, ':', path])
parse_patterns['ssh'] = pat
print(ub.repr2(parse_patterns))
"""
def __init__(self, url):
self._url = url
self._parts = None
def parts(self):
"""
Parses a GIT URL and returns an info dict.
Returns:
dict: info about the url
Raises:
Exception : if parsing fails
"""
info = {
'syntax': '',
'host': '',
'user': '',
'port': '',
'path': None,
'transport': '',
}
for syntax, regex in self.SYNTAX_PATTERNS.items():
match = regex.search(self._url)
if match:
info['syntax'] = syntax
info.update(match.groupdict())
break
else:
raise Exception('Invalid URL {!r}'.format(self._url))
# change none to empty string
for k, v in info.items():
if v is None:
info[k] = ''
return info
def format(self, protocol):
"""
Change the protocol of the git URL
"""
parts = self.parts()
if protocol == 'ssh':
parts['user'] = 'git@'
url = ''.join([
parts['user'], parts['host'], ':', parts['path']
])
else:
parts['transport'] = protocol + '://'
parts['port'] = ''
parts['user'] = ''
url = ''.join([
parts['transport'], parts['user'], parts['host'],
parts['port'], '/', parts['path']
])
return url
class Repo(ub.NiceRepr):
"""
Abstraction that references a git repository, and is able to manipulate it.
A common use case is to define a `remote` and a `code_dpath`, which lets
you check and ensure that the repo is cloned and on a particular branch.
You can also query its status, and pull, and perform custom git commands.
Args:
*args: name, dpath, code_dpath, remotes, remote, branch
Attributes:
All names listed in args are attributse. In addition, the class also
exposes these derived attributes.
url (URI): where the primary location is
Example:
>>> # Here is a simple example referencing ubelt
>>> from super_setup import *
>>> import ubelt as ub
>>> repo = Repo(
>>> remote='https://github.com/Erotemic/ubelt.git',
>>> code_dpath=ub.ensuredir(ub.expandpath('~/tmp/demo-repos')),
>>> )
>>> print('repo = {}'.format(repo))
>>> repo.check()
>>> repo.ensure()
>>> repo.check()
>>> repo.status()
>>> repo._cmd('python setup.py build')
>>> repo._cmd('./run_doctests.sh')
repo = <Repo('ubelt')>
>>> # Here is a less simple example referencing ubelt
>>> from super_setup import *
>>> import ubelt as ub
>>> repo = Repo(
>>> name='ubelt-local',
>>> remote='github',
>>> branch='master',
>>> remotes={
>>> 'github': 'https://github.com/Erotemic/ubelt.git',
>>> 'fakemirror': 'https://gitlab.com/Erotemic/ubelt.git',
>>> },
>>> code_dpath=ub.ensuredir(ub.expandpath('~/tmp/demo-repos')),
>>> )
>>> print('repo = {}'.format(repo))
>>> repo.ensure()
>>> repo._cmd('python setup.py build')
>>> repo._cmd('./run_doctests.sh')
"""
def __init__(repo, **kwargs):
repo.name = kwargs.pop('name', None)
repo.dpath = kwargs.pop('dpath', None)
repo.code_dpath = kwargs.pop('code_dpath', None)
repo.remotes = kwargs.pop('remotes', None)
repo.remote = kwargs.pop('remote', None)
repo.branch = kwargs.pop('branch', 'master')
repo._logged_lines = []
repo._logged_cmds = []
if repo.remote is None:
if repo.remotes is None:
raise ValueError('must specify some remote')
else:
if len(repo.remotes) > 1:
raise ValueError('remotes are ambiguous, specify one')
else:
repo.remote = ub.peek(repo.remotes)
else:
if repo.remotes is None:
_default_remote = 'origin'
repo.remotes = {
_default_remote: repo.remote
}
repo.remote = _default_remote
repo.url = repo.remotes[repo.remote]
if repo.name is None:
suffix = repo.url.split('/')[-1]
repo.name = suffix.split('.git')[0]
if repo.dpath is None:
repo.dpath = join(repo.code_dpath, repo.name)
repo.pkg_dpath = join(repo.dpath, repo.name)
for path_attr in ['dpath', 'code_dpath']:
path = getattr(repo, path_attr)
if path is not None:
setattr(repo, path_attr, ub.expandpath(path))
repo.verbose = kwargs.pop('verbose', 3)
if kwargs:
raise ValueError('unknown kwargs = {}'.format(kwargs.keys()))
repo._pygit = None
def set_protocol(self, protocol):
"""
Changes the url protocol to either ssh or https
Args:
protocol (str): can be ssh or https
"""
gurl = GitURL(self.url)
self.url = gurl.format(protocol)
def info(repo, msg):
repo._logged_lines.append(('INFO', 'INFO: ' + msg))
if repo.verbose >= 1:
print(msg)
def debug(repo, msg):
repo._logged_lines.append(('DEBUG', 'DEBUG: ' + msg))
if repo.verbose >= 1:
print(msg)
def _getlogs(repo):
return '\n'.join([t[1] for t in repo._logged_lines])
def __nice__(repo):
return '{}, branch={}'.format(repo.name, repo.branch)
def _cmd(repo, command, cwd=ub.NoParam, verbose=ub.NoParam):
if verbose is ub.NoParam:
verbose = repo.verbose
if cwd is ub.NoParam:
cwd = repo.dpath
repo._logged_cmds.append((command, cwd))
repo.debug('Run {!r} in {!r}'.format(command, cwd))
info = ub.cmd(command, cwd=cwd, verbose=verbose)
if verbose:
if info['out'].strip():
repo.info(info['out'])
if info['err'].strip():
repo.debug(info['err'])
if info['ret'] != 0:
raise ShellException(ub.repr2(info))
return info
@property
# @ub.memoize_property
def pygit(repo):
""" pip install gitpython """
import git as gitpython
if repo._pygit is None:
repo._pygit = gitpython.Repo(repo.dpath)
return repo._pygit
def develop(repo):
devsetup_script_fpath = join(repo.dpath, 'run_developer_setup.sh')
if not exists(devsetup_script_fpath):
raise AssertionError('Assume we always have run_developer_setup.sh: repo={!r}'.format(repo))
repo._cmd(devsetup_script_fpath, cwd=repo.dpath)
def doctest(repo):
devsetup_script_fpath = join(repo.dpath, 'run_doctests.sh')
if not exists(devsetup_script_fpath):
raise AssertionError('Assume we always have run_doctests.sh: repo={!r}'.format(repo))
repo._cmd(devsetup_script_fpath, cwd=repo.dpath)
def clone(repo):
if exists(repo.dpath):
raise ValueError('cannot clone into non-empty directory')
args = '--recursive'
if repo.branch is not None:
args += ' -b {}'.format(repo.branch)
command = 'git clone {args} {url} {dpath}'.format(args=args, url=repo.url, dpath=repo.dpath)
repo._cmd(command, cwd=repo.code_dpath)
def _assert_clean(repo):
if repo.pygit.is_dirty():
raise DirtyRepoError('The repo={} is dirty'.format(repo))
def check(repo):
repo.ensure(dry=True)
def versions(repo):
"""
Print current version information
"""
fmtkw = {}
fmtkw['pkg'] = parse_version(repo.pkg_dpath) + ','
fmtkw['sha1'] = repo._cmd('git rev-parse HEAD', verbose=0)['out'].strip()
try:
fmtkw['tag'] = repo._cmd('git describe --tags', verbose=0)['out'].strip() + ','
except ShellException:
fmtkw['tag'] = '<None>,'
fmtkw['branch'] = repo.pygit.active_branch.name + ','
fmtkw['repo'] = repo.name + ','
repo.info('repo={repo:<14} pkg={pkg:<12} tag={tag:<18} branch={branch:<10} sha1={sha1}'.format(
**fmtkw))
def ensure_clone(repo):
if exists(repo.dpath):
repo.debug('No need to clone existing repo={}'.format(repo))
else:
repo.debug('Clone non-existing repo={}'.format(repo))
repo.clone()
def ensure(repo, dry=False):
"""
Ensure that the repo is checked out on your local machine, that the
correct branch is checked out, and the upstreams are targeting the
correct remotes.
"""
if repo.verbose > 0:
if dry:
repo.debug(ub.color_text('Checking {}'.format(repo), 'blue'))
else:
repo.debug(ub.color_text('Ensuring {}'.format(repo), 'blue'))
if not exists(repo.dpath):
repo.debug('NEED TO CLONE {}'.format(repo))
if dry:
return
repo.ensure_clone()
repo._assert_clean()
# Ensure all registered remotes exist
for remote_name, remote_url in repo.remotes.items():
try:
remote = repo.pygit.remotes[remote_name]
have_urls = list(remote.urls)
if remote_url not in have_urls:
print('WARNING: REMOTE NAME EXIST BUT URL IS NOT {}. '
'INSTEAD GOT: {}'.format(remote_url, have_urls))
except (IndexError):
try:
print('NEED TO ADD REMOTE {}->{} FOR {}'.format(
remote_name, remote_url, repo))
if not dry:
repo._cmd('git remote add {} {}'.format(remote_name, remote_url))
except ShellException:
if remote_name == repo.remote:
# Only error if the main remote is not available
raise
# Ensure we have the right remote
try:
remote = repo.pygit.remotes[repo.remote]
except IndexError:
if not dry:
raise AssertionError('Something went wrong')
else:
remote = None
if remote is not None:
try:
if not remote.exists():
raise IndexError
else:
repo.debug('The requested remote={} name exists'.format(remote))
except IndexError:
repo.debug('WARNING: remote={} does not exist'.format(remote))
else:
if remote.exists():
repo.debug('Requested remote does exists')
remote_branchnames = [ref.remote_head for ref in remote.refs]
if repo.branch not in remote_branchnames:
repo.info('Branch name not found in local remote. Attempting to fetch')
if dry:
repo.info('dry run, not fetching')
else:
repo._cmd('git fetch {}'.format(remote.name))
repo.info('Fetch was successful')
else:
repo.debug('Requested remote does NOT exist')
# Ensure the remote points to the right place
if repo.url not in list(remote.urls):
repo.debug('WARNING: The requested url={} disagrees with remote urls={}'.format(repo.url, list(remote.urls)))
if dry:
repo.info('Dry run, not updating remote url')
else:
repo.info('Updating remote url')
repo._cmd('git remote set-url {} {}'.format(repo.remote, repo.url))
# Ensure we are on the right branch
if repo.branch != repo.pygit.active_branch.name:
repo.debug('NEED TO SET BRANCH TO {} for {}'.format(repo.branch, repo))
try:
repo._cmd('git checkout {}'.format(repo.branch))
except ShellException:
repo.debug('Checkout failed. Branch name might be ambiguous. Trying again')
try:
repo._cmd('git checkout -b {} {}/{}'.format(repo.branch, repo.remote, repo.branch))
except ShellException:
raise Exception('does the branch exist on the remote?')
tracking_branch = repo.pygit.active_branch.tracking_branch()
if tracking_branch is None or tracking_branch.remote_name != repo.remote:
repo.debug('NEED TO SET UPSTREAM FOR FOR {}'.format(repo))
try:
remote = repo.pygit.remotes[repo.remote]
if not remote.exists():
raise IndexError
except IndexError:
repo.debug('WARNING: remote={} does not exist'.format(remote))
else:
if remote.exists():
remote_branchnames = [ref.remote_head for ref in remote.refs]
if repo.branch not in remote_branchnames:
if dry:
repo.info('Branch name not found in local remote. Dry run, use ensure to attempt to fetch')
else:
repo.info('Branch name not found in local remote. Attempting to fetch')
repo._cmd('git fetch {}'.format(repo.remote))
remote_branchnames = [ref.remote_head for ref in remote.refs]
if repo.branch not in remote_branchnames:
raise Exception('Branch name still does not exist')
if not dry:
repo._cmd('git branch --set-upstream-to={remote}/{branch} {branch}'.format(
remote=repo.remote, branch=repo.branch
))
else:
repo.info('Would attempt to set upstream')
# Print some status
repo.debug(' * branch = {} -> {}'.format(
repo.pygit.active_branch.name,
repo.pygit.active_branch.tracking_branch(),
))
def pull(repo):
repo._assert_clean()
repo._cmd('git pull')
def status(repo):
repo._cmd('git status')
def worker(repo, funcname, kwargs):
repo.verbose = 0
func = getattr(repo, funcname)
func(**kwargs)
return repo
class RepoRegistry(ub.NiceRepr):
def __init__(registery, repos):
registery.repos = repos
def __nice__(registery):
return ub.repr2(registery.repos, si=1, nl=1)
def apply(registery, funcname, num_workers=0, **kwargs):
print(ub.color_text('--- APPLY {} ---'.format(funcname), 'white'))
print(' * num_workers = {!r}'.format(num_workers))
if num_workers == 0:
processed_repos = []
for repo in registery.repos:
print(ub.color_text('--- REPO = {} ---'.format(repo), 'blue'))
try:
getattr(repo, funcname)(**kwargs)
except DirtyRepoError:
print(ub.color_text('Ignoring dirty repo={}'.format(repo), 'red'))
processed_repos.append(repo)
else:
from concurrent import futures
# with futures.ThreadPoolExecutor(max_workers=num_workers) as pool:
with futures.ProcessPoolExecutor(max_workers=num_workers) as pool:
tasks = []
for i, repo in enumerate(registery.repos):
future = pool.submit(worker, repo, funcname, kwargs)
future.repo = repo
tasks.append(future)
processed_repos = []
for future in futures.as_completed(tasks):
repo = future.repo
print(ub.color_text('--- REPO = {} ---'.format(repo), 'blue'))
try:
repo = future.result()
except DirtyRepoError:
print(ub.color_text('Ignoring dirty repo={}'.format(repo), 'red'))
else:
print(repo._getlogs())
processed_repos.append(repo)
print(ub.color_text('--- FINISHED APPLY {} ---'.format(funcname), 'white'))
SHOW_CMDLOG = 1
if SHOW_CMDLOG:
print('LOGGED COMMANDS')
import os
ORIG_CWD = MY_CWD = os.getcwd()
for repo in processed_repos:
print('# --- For repo = {!r} --- '.format(repo))
for t in repo._logged_cmds:
cmd, cwd = t
if cwd is None:
cwd = os.get_cwd()
if cwd != MY_CWD:
print('cd ' + ub.shrinkuser(cwd))
MY_CWD = cwd
print(cmd)
print('cd ' + ub.shrinkuser(ORIG_CWD))
def determine_code_dpath():
"""
Returns a good place to put the code for the internal dependencies.
Returns:
PathLike: the directory where you want to store your code
In order, the methods used for determing this are:
* the `--codedpath` command line flag (may be undocumented in the CLI)
* the `--codedir` command line flag (may be undocumented in the CLI)
* the CODE_DPATH environment variable
* the CODE_DIR environment variable
* the directory above this script (e.g. if this is in ~/code/repo/super_setup.py then code dir resolves to ~/code)
* the user's ~/code directory.
"""
import os
candidates = [
ub.argval('--codedir', default=''),
ub.argval('--codedpath', default=''),
os.environ.get('CODE_DPATH', ''),
os.environ.get('CODE_DIR', ''),
]
valid = [c for c in candidates if c != '']
if len(valid) > 0:
code_dpath = valid[0]
else:
try:
# This file should be in the top level of a repo, the directory from
# this file should be the code directory.
this_fpath = abspath(__file__)
code_dpath = abspath(dirname(dirname(this_fpath)))
except NameError:
code_dpath = ub.expandpath('~/code')
if not exists(code_dpath):
code_dpath = ub.expandpath(code_dpath)
# if CODE_DIR and not exists(CODE_DIR):
# import warnings
# warnings.warn('environment variable CODE_DIR={!r} was defined, but does not exist'.format(CODE_DIR))
if not exists(code_dpath):
raise Exception(ub.codeblock(
'''
Please specify a correct code_dir using the CLI or ENV.
code_dpath={!r} does not exist.
'''.format(code_dpath)))
return code_dpath
def make_netharn_registry():
code_dpath = determine_code_dpath()
CommonRepo = functools.partial(Repo, code_dpath=code_dpath)
repos = [
# The util libs
CommonRepo(
name='utool', branch='master', remote='Erotemic',
remotes={'Erotemic': '[email protected]:Erotemic/utool.git'},
),
CommonRepo(
name='vtool_ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': '[email protected]:Erotemic/vtool_ibeis.git'},
),
CommonRepo(
name='dtool_ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': '[email protected]:Erotemic/dtool_ibeis.git'},
),
CommonRepo(
name='plottool_ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': '[email protected]:Erotemic/plottool_ibeis.git'},
),
CommonRepo(
name='guitool_ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': '[email protected]:Erotemic/guitool_ibeis.git'},
),
CommonRepo(
name='ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': '[email protected]:Erotemic/ibeis.git'},
),
]
registery = RepoRegistry(repos)
return registery
def main():
import click
registery = make_netharn_registry()
only = ub.argval('--only', default=None)
if only is not None:
only = only.split(',')
registery.repos = [repo for repo in registery.repos if repo.name in only]
num_workers = int(ub.argval('--workers', default=8))
if ub.argflag('--serial'):
num_workers = 0
protocol = ub.argval('--protocol', None)
if ub.argflag('--https'):
protocol = 'https'
if ub.argflag('--http'):
protocol = 'http'
if ub.argflag('--ssh'):
protocol = 'ssh'
if protocol is not None:
for repo in registery.repos:
repo.set_protocol(protocol)
default_context_settings = {
'help_option_names': ['-h', '--help'],
'allow_extra_args': True,
'ignore_unknown_options': True}
@click.group(context_settings=default_context_settings)
def cli_group():
pass
@cli_group.add_command
@click.command('pull', context_settings=default_context_settings)
def pull():
registery.apply('pull', num_workers=num_workers)
@cli_group.add_command
@click.command('ensure', context_settings=default_context_settings)
def ensure():
"""
Ensure is the live run of "check".
"""
registery.apply('ensure', num_workers=num_workers)
@cli_group.add_command
@click.command('ensure_clone', context_settings=default_context_settings)
def ensure_clone():
registery.apply('ensure_clone', num_workers=num_workers)
@cli_group.add_command
@click.command('check', context_settings=default_context_settings)
def check():
"""
Check is just a dry run of "ensure".
"""
registery.apply('check', num_workers=num_workers)
@cli_group.add_command
@click.command('status', context_settings=default_context_settings)
def status():
registery.apply('status', num_workers=num_workers)
@cli_group.add_command
@click.command('develop', context_settings=default_context_settings)
def develop():
registery.apply('develop', num_workers=0)
@cli_group.add_command
@click.command('doctest', context_settings=default_context_settings)
def doctest():
registery.apply('doctest')
@cli_group.add_command
@click.command('versions', context_settings=default_context_settings)
def versions():
registery.apply('versions')
cli_group()
if __name__ == '__main__':
main()
| apache-2.0 | -6,734,773,674,303,073,000 | 34.009186 | 125 | 0.52922 | false | 4.09722 | false | false | false |
ToonTownInfiniteRepo/ToontownInfinite | toontown/toon/GroupPanel.py | 1 | 18189 | from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from otp.nametag import NametagGlobals
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.showbase import DirectObject
from toontown.toon import ToonAvatarPanel
from toontown.toontowngui import TTDialog
class GroupPanel(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('GroupPanel')
def __init__(self, boardingParty):
self.boardingParty = boardingParty
self.leaderId = self.boardingParty.getGroupLeader(localAvatar.doId)
self.elevatorIdList = self.boardingParty.getElevatorIdList()
self.frame = None
self.confirmQuitDialog = None
self.goButton = None
self.destScrollList = None
self.destFrame = None
self.goingToLabel = None
self.destIndexSelected = 0
self.__load()
self.ignore('stickerBookEntered')
self.accept('stickerBookEntered', self.__forceHide)
self.ignore('stickerBookExited')
self.accept('stickerBookExited', self.__forceShow)
return
def cleanup(self):
base.setCellsAvailable(base.leftCells, 1)
self.quitButton.destroy()
self.hideButton.destroy()
self.showButton.destroy()
self.scrollList.destroy()
if self.goButton:
self.goButton.destroy()
self.goButton = None
if self.destScrollList:
self.destScrollList.destroy()
self.destScrollList = None
if self.destFrame:
self.destFrame.destroy()
self.destFrame = None
if self.goingToLabel:
self.goingToLabel.destroy()
self.goingToLabel = None
if self.frame:
self.frame.destroy()
self.frame = None
self.leaveButton = None
self.boardingParty = None
self.ignoreAll()
return
def __load(self):
self.guiBg = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_groupListBg')
self.__defineConstants()
if self.boardingParty.maxSize == 4:
bgImage = self.guiBg.find('**/tt_t_gui_brd_memberListTop_half')
bgImageZPos = 0.14
frameZPos = -0.121442
quitButtonZPos = -0.019958
else:
bgImage = self.guiBg.find('**/tt_t_gui_brd_memberListTop')
bgImageZPos = 0
frameZPos = 0.0278943
quitButtonZPos = -0.30366
guiButtons = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_status')
self.frame = DirectFrame(parent=base.a2dLeftCenter, relief=None, image=bgImage, image_scale=(0.5, 1, 0.5), image_pos=(0, 0, bgImageZPos), textMayChange=1, pos=(0.32, 0, 0))
self.frameBounds = self.frame.getBounds()
leaveButtonGui = loader.loadModel('phase_3.5/models/gui/tt_m_gui_brd_leaveBtn')
leaveImageList = (leaveButtonGui.find('**/tt_t_gui_brd_leaveUp'),
leaveButtonGui.find('**/tt_t_gui_brd_leaveDown'),
leaveButtonGui.find('**/tt_t_gui_brd_leaveHover'),
leaveButtonGui.find('**/tt_t_gui_brd_leaveUp'))
self.leaderButtonImage = guiButtons.find('**/tt_t_gui_brd_statusLeader')
self.availableButtonImage = guiButtons.find('**/tt_t_gui_brd_statusOn')
self.battleButtonImage = guiButtons.find('**/tt_t_gui_brd_statusBattle')
if localAvatar.doId == self.leaderId:
quitText = TTLocalizer.QuitBoardingPartyLeader
else:
quitText = TTLocalizer.QuitBoardingPartyNonLeader
self.disabledOrangeColor = Vec4(1, 0.5, 0.25, 0.9)
self.quitButton = DirectButton(parent=self.frame, relief=None, image=leaveImageList, image_scale=0.065, command=self.__handleLeaveButton, text=('',
quitText,
quitText,
''), text_scale=0.06, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0.045, 0.0), text_align=TextNode.ALeft, pos=(0.223, 0, quitButtonZPos), image3_color=self.disabledOrangeColor)
arrowGui = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_arrow')
hideImageList = (arrowGui.find('**/tt_t_gui_brd_arrow_up'), arrowGui.find('**/tt_t_gui_brd_arrow_down'), arrowGui.find('**/tt_t_gui_brd_arrow_hover'))
showImageList = (arrowGui.find('**/tt_t_gui_brd_arrow_up'), arrowGui.find('**/tt_t_gui_brd_arrow_down'), arrowGui.find('**/tt_t_gui_brd_arrow_hover'))
self.hideButton = DirectButton(parent=base.a2dLeftCenter, relief=None, text_pos=(0, 0.15), text_scale=0.06, text_align=TextNode.ALeft, text_fg=Vec4(0, 0, 0, 1), text_shadow=Vec4(1, 1, 1, 1), image=hideImageList, image_scale=(-0.35, 1, 0.5), pos=(0.04, 0, 0.03), scale=1.05, command=self.hide)
self.showButton = DirectButton(parent=base.a2dLeftCenter, relief=None, text=('', TTLocalizer.BoardingGroupShow, TTLocalizer.BoardingGroupShow), text_pos=(0.03, 0), text_scale=0.06, text_align=TextNode.ALeft, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), image=showImageList, image_scale=(0.35, 1, 0.5), pos=(0.04, 0, 0.03), scale=1.05, command=self.show)
self.showButton.hide()
self.frame.show()
self.__makeAvatarNameScrolledList()
if localAvatar.doId == self.leaderId:
self.__makeDestinationScrolledList()
else:
self.__makeDestinationFrame()
self.__makeGoingToLabel()
self.accept('updateGroupStatus', self.__checkGroupStatus)
self.accept('ToonBattleIdUpdate', self.__possibleGroupUpdate)
base.setCellsAvailable([base.leftCells[1], base.leftCells[2]], 0)
if self.boardingParty.isGroupLeader(localAvatar.doId):
base.setCellsAvailable([base.leftCells[0]], 0)
self.__addTestNames(self.boardingParty.maxSize)
self.guiBg.removeNode()
guiButtons.removeNode()
leaveButtonGui.removeNode()
arrowGui.removeNode()
return
def __defineConstants(self):
self.forcedHidden = False
self.textFgcolor = Vec4(0.0, 0.6, 0.2, 1.0)
self.textBgRolloverColor = Vec4(1, 1, 0, 1)
self.textBgDownColor = Vec4(0.5, 0.9, 1, 1)
self.textBgDisabledColor = Vec4(0.4, 0.8, 0.4, 1)
def __handleLeaveButton(self):
messenger.send('wakeup')
if not base.cr.playGame.getPlace().getState() == 'elevator':
self.confirmQuitDialog = TTDialog.TTDialog(style=TTDialog.YesNo, text=TTLocalizer.QuitBoardingPartyConfirm, command=self.__confirmQuitCallback)
self.confirmQuitDialog.show()
def __confirmQuitCallback(self, value):
if self.confirmQuitDialog:
self.confirmQuitDialog.destroy()
self.confirmQuitDialog = None
if value > 0:
if self.boardingParty:
self.boardingParty.requestLeave()
return
def __handleGoButton(self):
offset = self.destScrollList.getSelectedIndex()
elevatorId = self.elevatorIdList[offset]
self.boardingParty.requestGoToFirstTime(elevatorId)
def __handleCancelGoButton(self):
self.boardingParty.cancelGoToElvatorDest()
def __checkGroupStatus(self):
if not self.boardingParty:
return
self.notify.debug('__checkGroupStatus %s' % self.boardingParty.getGroupMemberList(localAvatar.doId))
myMemberList = self.boardingParty.getGroupMemberList(localAvatar.doId)
self.scrollList.removeAndDestroyAllItems(refresh=0)
if myMemberList:
for avId in myMemberList:
avatarButton = self.__getAvatarButton(avId)
if avatarButton:
self.scrollList.addItem(avatarButton, refresh=0)
self.scrollList.refresh()
def __possibleGroupUpdate(self, avId):
self.notify.debug('GroupPanel __possibleGroupUpdate')
if not self.boardingParty:
return
myMemberList = self.boardingParty.getGroupMemberList(localAvatar.doId)
if avId in myMemberList:
self.__checkGroupStatus()
def __makeAvatarNameScrolledList(self):
friendsListGui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
self.scrollList = DirectScrolledList(parent=self.frame, relief=None, incButton_image=(friendsListGui.find('**/FndsLst_ScrollUp'),
friendsListGui.find('**/FndsLst_ScrollDN'),
friendsListGui.find('**/FndsLst_ScrollUp_Rllvr'),
friendsListGui.find('**/FndsLst_ScrollUp')), incButton_pos=(0.0, 0.0, -0.35), incButton_image1_color=Vec4(1.0, 0.9, 0.4, 0), incButton_image3_color=Vec4(1.0, 1.0, 0.6, 0), incButton_scale=(1.0, 1.0, -1.0), incButton_relief=None, decButton_image=(friendsListGui.find('**/FndsLst_ScrollUp'),
friendsListGui.find('**/FndsLst_ScrollDN'),
friendsListGui.find('**/FndsLst_ScrollUp_Rllvr'),
friendsListGui.find('**/FndsLst_ScrollUp')), decButton_pos=(0.0, 0.0, 0.1), decButton_image1_color=Vec4(1.0, 1.0, 0.6, 0), decButton_image3_color=Vec4(1.0, 1.0, 0.6, 0), decButton_relief=None, itemFrame_pos=(-0.195, 0.0, 0.185), itemFrame_borderWidth=(0.1, 0.1), numItemsVisible=8, itemFrame_scale=1.0, forceHeight=0.07, items=[], pos=(0, 0, 0.075))
clipper = PlaneNode('clipper')
clipper.setPlane(Plane(Vec3(-1, 0, 0), Point3(0.235, 0, 0)))
clipNP = self.scrollList.attachNewNode(clipper)
self.scrollList.setClipPlane(clipNP)
friendsListGui.removeNode()
return
def __makeDestinationScrolledList(self):
arrowGui = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_gotoArrow')
incrementImageList = (arrowGui.find('**/tt_t_gui_brd_arrowL_gotoUp'),
arrowGui.find('**/tt_t_gui_brd_arrowL_gotoDown'),
arrowGui.find('**/tt_t_gui_brd_arrowL_gotoHover'),
arrowGui.find('**/tt_t_gui_brd_arrowL_gotoUp'))
if self.boardingParty.maxSize == 4:
zPos = -0.177083
else:
zPos = -0.463843
bottomImage = self.guiBg.find('**/tt_t_gui_brd_memberListBtm_leader')
self.destScrollList = DirectScrolledList(
parent=self.frame,
relief=None,
image=bottomImage,
image_scale=(0.5, 1, 0.5),
incButton_image=incrementImageList,
incButton_pos=(0.217302, 0, 0.07),
incButton_image3_color=Vec4(1.0, 1.0, 0.6, 0.5),
incButton_scale=(-0.5, 1, 0.5),
incButton_relief=None,
incButtonCallback=self.__informDestChange,
decButton_image=incrementImageList,
decButton_pos=(-0.217302, 0, 0.07),
decButton_scale=(0.5, 1, 0.5),
decButton_image3_color=Vec4(1.0, 1.0, 0.6, 0.5),
decButton_relief=None,
decButtonCallback=self.__informDestChange,
itemFrame_pos=(0, 0, 0.06),
itemFrame_borderWidth=(0.1, 0.1),
numItemsVisible=1,
itemFrame_scale=TTLocalizer.GPdestScrollList,
forceHeight=0.07,
items=[],
pos=(0, 0, zPos),
scrollSpeed=0.1)
arrowGui.removeNode()
self.__addDestNames()
self.__makeGoButton()
return
def __addDestNames(self):
for i in xrange(len(self.elevatorIdList)):
destName = self.__getDestName(i)
self.destScrollList.addItem(destName, refresh=0)
self.destScrollList.refresh()
def __getDestName(self, offset):
elevatorId = self.elevatorIdList[offset]
elevator = base.cr.doId2do.get(elevatorId)
if elevator:
destName = elevator.getDestName()
return destName
def __makeDestinationFrame(self):
destName = self.__getDestName(self.destIndexSelected)
if self.boardingParty.maxSize == 4:
zPos = -0.12
else:
zPos = -0.404267
bottomImage = self.guiBg.find('**/tt_t_gui_brd_memberListBtm_nonLeader')
self.destFrame = DirectFrame(parent=self.frame, relief=None, image=bottomImage, image_scale=(0.5, 1, 0.5), text=destName, text_align=TextNode.ACenter, text_scale=TTLocalizer.GPdestFrame, pos=(0, 0, zPos))
return
def __makeGoButton(self):
goGui = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_gotoBtn')
self.goImageList = (goGui.find('**/tt_t_gui_brd_gotoUp'),
goGui.find('**/tt_t_gui_brd_gotoDown'),
goGui.find('**/tt_t_gui_brd_gotoHover'),
goGui.find('**/tt_t_gui_brd_gotoUp'))
self.cancelGoImageList = (goGui.find('**/tt_t_gui_brd_cancelGotoUp'),
goGui.find('**/tt_t_gui_brd_cancelGotoDown'),
goGui.find('**/tt_t_gui_brd_cancelGotoHover'),
goGui.find('**/tt_t_gui_brd_cancelGotoUp'))
if self.boardingParty.maxSize == 4:
zPos = -0.028
zPos = -0.0360483
else:
zPos = -0.0353787
self.goButton = DirectButton(parent=self.destScrollList, relief=None, image=self.goImageList, image_scale=(0.48, 1, 0.48), command=self.__handleGoButton, text=('',
TTLocalizer.BoardingGo,
TTLocalizer.BoardingGo,
''), text_scale=TTLocalizer.GPgoButton, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0, -0.12), pos=(-0.003, 0, zPos))
goGui.removeNode()
return
def __getAvatarButton(self, avId):
toon = base.cr.doId2do.get(avId)
if not toon:
return None
toonName = toon.getName()
inBattle = 0
buttonImage = self.availableButtonImage
if toon.battleId:
inBattle = 1
buttonImage = self.battleButtonImage
if avId == localAvatar.doId:
self.__forceHide()
else:
if avId == self.leaderId:
buttonImage = self.leaderButtonImage
if avId == localAvatar.doId:
self.__forceShow()
return DirectButton(parent=self.frame, relief=None, image=buttonImage, image_scale=(0.06, 1.0, 0.06), text=toonName, text_align=TextNode.ALeft, text_wordwrap=16, text_scale=0.04, text_pos=(0.05, -0.015), text_fg=self.textFgcolor, text1_bg=self.textBgDownColor, text2_bg=self.textBgRolloverColor, text3_fg=self.textBgDisabledColor, pos=(0, 0, 0.2), command=self.__openToonAvatarPanel, extraArgs=[toon, avId])
def __openToonAvatarPanel(self, avatar, avId):
if avId != localAvatar.doId and avatar:
messenger.send('clickedNametag', [avatar])
def __addTestNames(self, num):
for i in xrange(num):
avatarButton = self.__getAvatarButton(localAvatar.doId)
self.scrollList.addItem(avatarButton, refresh=0)
self.scrollList.refresh()
def __isForcedHidden(self):
if self.forcedHidden and self.frame.isHidden():
return True
else:
return False
def hide(self):
self.frame.hide()
self.hideButton.hide()
self.showButton.show()
def show(self):
self.frame.show()
self.forcedHidden = False
self.showButton.hide()
self.hideButton.show()
def __forceHide(self):
if not self.frame.isHidden():
self.forcedHidden = True
self.hide()
def __forceShow(self):
if self.__isForcedHidden():
self.show()
def __informDestChange(self):
self.boardingParty.informDestChange(self.destScrollList.getSelectedIndex())
def changeDestination(self, offset):
if localAvatar.doId != self.leaderId:
self.destIndexSelected = offset
if self.destFrame:
self.destFrame['text'] = self.__getDestName(self.destIndexSelected)
def scrollToDestination(self, offset):
if localAvatar.doId == self.leaderId:
if self.destScrollList:
self.destIndexSelected = offset
self.destScrollList.scrollTo(offset)
def __makeGoingToLabel(self):
if self.boardingParty.maxSize == 4:
zPos = -0.0466546
else:
zPos = -0.331731
self.goingToLabel = DirectLabel(parent=self.frame, relief=None, text=TTLocalizer.BoardingGoingTo, text_scale=0.045, text_align=TextNode.ALeft, text_fg=Vec4(0, 0, 0, 1), pos=(-0.1966, 0, zPos))
return
def disableQuitButton(self):
if self.quitButton and not self.quitButton.isEmpty():
self.quitButton['state'] = DGG.DISABLED
def enableQuitButton(self):
if self.quitButton and not self.quitButton.isEmpty():
self.quitButton['state'] = DGG.NORMAL
def disableGoButton(self):
if self.goButton and not self.goButton.isEmpty():
self.goButton['state'] = DGG.DISABLED
self.goButton['image_color'] = Vec4(1, 1, 1, 0.4)
def enableGoButton(self):
if self.goButton and not self.goButton.isEmpty():
self.goButton['state'] = DGG.NORMAL
self.goButton['image_color'] = Vec4(1, 1, 1, 1)
def disableDestinationScrolledList(self):
if self.destScrollList and not self.destScrollList.isEmpty():
self.destScrollList.incButton['state'] = DGG.DISABLED
self.destScrollList.decButton['state'] = DGG.DISABLED
def enableDestinationScrolledList(self):
if self.destScrollList and not self.destScrollList.isEmpty():
self.destScrollList.incButton['state'] = DGG.NORMAL
self.destScrollList.decButton['state'] = DGG.NORMAL
def changeGoToCancel(self):
if self.goButton and not self.goButton.isEmpty():
self.goButton['image'] = self.cancelGoImageList
self.goButton['text'] = (TTLocalizer.BoardingCancelGo,
TTLocalizer.BoardingCancelGo,
TTLocalizer.BoardingCancelGo,
'')
self.goButton['command'] = self.__handleCancelGoButton
def changeCancelToGo(self):
if self.goButton and not self.goButton.isEmpty():
self.goButton['image'] = self.goImageList
self.goButton['text'] = ('',
TTLocalizer.BoardingGo,
TTLocalizer.BoardingGo,
'')
self.goButton['command'] = self.__handleGoButton
| mit | 558,389,024,408,593,340 | 45.164975 | 415 | 0.630601 | false | 3.357143 | false | false | false |
endlessm/chromium-browser | third_party/chromite/scripts/cros_oobe_autoconfig_unittest.py | 1 | 6578 | # -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for cros_oobe_autoconfig.py"""
from __future__ import print_function
import json
import os
import pwd
import sys
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import image_lib
from chromite.lib import osutils
from chromite.scripts import cros_oobe_autoconfig
pytestmark = [cros_test_lib.pytestmark_inside_only,
cros_test_lib.pytestmark_skip('https://crbug.com/1000761')]
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
_TEST_DOMAIN = 'test.com'
_TEST_CLI_PARAMETERS = (
'image.bin', '--x-demo-mode', '--x-network-onc', '{}',
'--x-network-auto-connect', '--x-eula-send-statistics',
'--x-eula-auto-accept', '--x-update-skip', '--x-wizard-auto-enroll',
'--enrollment-domain', _TEST_DOMAIN)
_TEST_CONFIG_JSON = {
'demo-mode': True,
'network-onc': '{}',
'network-auto-connect': True,
'eula-send-statistics': True,
'eula-auto-accept': True,
'update-skip': True,
'wizard-auto-enroll': True
}
_IMAGE_SIZE = 4 * 1024 * 1024
_BLOCK_SIZE = 4096
_SECTOR_SIZE = 512
_STATEFUL_SIZE = _IMAGE_SIZE // 2
_STATEFUL_OFFSET = 120 * _SECTOR_SIZE
class SanitizeDomainTests(cros_test_lib.TestCase):
"""Tests for SanitizeDomain()"""
def testASCII(self):
"""Tests that ASCII-only domains are not mangled."""
self.assertEqual(cros_oobe_autoconfig.SanitizeDomain('FoO.cOm'), 'foo.com')
def testUnicodeCase(self):
"""Tests that ASCII-only domains are not mangled."""
self.assertEqual(cros_oobe_autoconfig.SanitizeDomain(u'föo.com'),
'xn--fo-fka.com')
self.assertEqual(cros_oobe_autoconfig.SanitizeDomain(u'fÖo.com'),
'xn--fo-fka.com')
def testHomographs(self):
"""Tests that a Unicode domain is punycoded."""
# "tеѕt.com" looks like "test.com" but isn't!
self.assertEqual(cros_oobe_autoconfig.SanitizeDomain(
u't\u0435\u0455t.com'), 'xn--tt-nlc2k.com')
class PrepareImageTests(cros_test_lib.MockTempDirTestCase):
"""Tests for PrepareImage()"""
def setUp(self):
"""Create a small test disk image for testing."""
self.image = os.path.join(self.tempdir, 'image.bin')
state = os.path.join(self.tempdir, 'state.bin')
# Allocate space for the disk image and stateful partition.
osutils.AllocateFile(self.image, _IMAGE_SIZE)
osutils.AllocateFile(state, _STATEFUL_SIZE)
commands = (
# Format the stateful image as ext4.
['/sbin/mkfs.ext4', state],
# Create the GPT headers and entry for the stateful partition.
['cgpt', 'create', self.image],
['cgpt', 'boot', '-p', self.image],
['cgpt', 'add', self.image, '-t', 'data',
'-l', str(constants.CROS_PART_STATEFUL),
'-b', str(_STATEFUL_OFFSET // _SECTOR_SIZE),
'-s', str(_STATEFUL_SIZE // _SECTOR_SIZE), '-i', '1'],
# Copy the stateful partition into the GPT image.
['dd', 'if=%s' % state, 'of=%s' % self.image, 'conv=notrunc', 'bs=4K',
'seek=%d' % (_STATEFUL_OFFSET // _BLOCK_SIZE),
'count=%s' % (_STATEFUL_SIZE // _BLOCK_SIZE)],
['sync'])
for cmd in commands:
cros_build_lib.run(cmd, quiet=True)
# Run the preparation script on the image.
cros_oobe_autoconfig.main([self.image] + list(_TEST_CLI_PARAMETERS)[1:])
# Mount the image's stateful partition for inspection.
self.mount_tmp = os.path.join(self.tempdir, 'mount')
osutils.SafeMakedirs(self.mount_tmp)
self.mount_ctx = image_lib.LoopbackPartitions(self.image, self.mount_tmp)
self.mount = os.path.join(self.mount_tmp,
'dir-%s' % constants.CROS_PART_STATEFUL)
self.oobe_autoconf_path = os.path.join(self.mount, 'unencrypted',
'oobe_auto_config')
self.config_path = os.path.join(self.oobe_autoconf_path, 'config.json')
self.domain_path = os.path.join(self.oobe_autoconf_path,
'enrollment_domain')
def testChronosOwned(self):
"""Test that the OOBE autoconfig directory is owned by chronos."""
with self.mount_ctx:
# TODO(mikenichols): Remove unneeded mount call once context
# handling is in place, http://crrev/c/1795578
_ = self.mount_ctx.Mount((constants.CROS_PART_STATEFUL,))[0]
chronos_uid = pwd.getpwnam('chronos').pw_uid
self.assertExists(self.oobe_autoconf_path)
self.assertEqual(os.stat(self.config_path).st_uid, chronos_uid)
def testConfigContents(self):
"""Test that the config JSON matches the correct data."""
with self.mount_ctx:
# TODO(mikenichols): Remove unneeded mount call once context
# handling is in place, http://crrev/c/1795578
_ = self.mount_ctx.Mount((constants.CROS_PART_STATEFUL,))[0]
with open(self.config_path) as fp:
data = json.load(fp)
self.assertEqual(data, _TEST_CONFIG_JSON)
def testDomainContents(self):
"""Test that the domain file matches the correct data."""
with self.mount_ctx:
# TODO(mikenichols): Remove unneeded mount call once context
# handling is in place, http://crrev/c/1795578
_ = self.mount_ctx.Mount((constants.CROS_PART_STATEFUL,))[0]
self.assertEqual(osutils.ReadFile(self.domain_path), _TEST_DOMAIN)
class GetConfigContentTests(cros_test_lib.MockTestCase):
"""Tests for GetConfigContent()"""
def testBasic(self):
"""Test that config is generated correctly with all options."""
opts = cros_oobe_autoconfig.ParseArguments(_TEST_CLI_PARAMETERS)
conf = cros_oobe_autoconfig.GetConfigContent(opts)
self.assertEqual(json.loads(conf), _TEST_CONFIG_JSON)
def testUnspecified(self):
"""Test that config is generated correctly with some options missing."""
cli = list(_TEST_CLI_PARAMETERS)
cli.remove('--x-update-skip')
expected = dict(_TEST_CONFIG_JSON)
expected['update-skip'] = False
opts = cros_oobe_autoconfig.ParseArguments(cli)
conf = cros_oobe_autoconfig.GetConfigContent(opts)
self.assertEqual(json.loads(conf), expected)
class MainTests(cros_test_lib.MockTestCase):
"""Tests for main()"""
def setUp(self):
self.PatchObject(cros_oobe_autoconfig, 'PrepareImage')
def testBasic(self):
"""Simple smoke test"""
cros_oobe_autoconfig.main(_TEST_CLI_PARAMETERS)
| bsd-3-clause | 1,724,904,975,207,223,000 | 36.352273 | 79 | 0.65896 | false | 3.291938 | true | false | false |
jonberliner/keras | keras/optimizers.py | 1 | 7022 | from __future__ import absolute_import
import theano
import theano.tensor as T
import numpy as np
from .utils.theano_utils import shared_zeros, shared_scalar
from six.moves import zip
def clip_norm(g, c, n):
if c > 0:
g = T.switch(T.ge(n, c), g * c / n, g)
return g
def kl_divergence(p, p_hat):
return p_hat - p + p * T.log(p / p_hat)
class Optimizer(object):
def get_updates(self, params, constraints, loss):
raise NotImplementedError
def get_gradients(self, loss, params):
grads = T.grad(loss, params)
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = T.sqrt(sum([T.sum(g ** 2) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
return grads
def get_config(self):
return {"name": self.__class__.__name__}
class SGD(Optimizer):
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
self.iterations = shared_scalar(0)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
lr = self.lr * (1.0 / (1.0 + self.decay * self.iterations))
updates = [(self.iterations, self.iterations + 1.)]
for p, g, c in zip(params, grads, constraints):
m = shared_zeros(p.get_value().shape) # momentum
v = self.momentum * m - lr * g # velocity
updates.append((m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
updates.append((p, c(new_p))) # apply constraints
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"momentum": self.momentum,
"decay": self.decay,
"nesterov": self.nesterov}
class RMSprop(Optimizer):
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
updates = []
for p, g, a, c in zip(params, grads, accumulators, constraints):
new_a = self.rho * a + (1 - self.rho) * g ** 2 # update accumulator
updates.append((a, new_a))
new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)
updates.append((p, c(new_p))) # apply constraints
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"rho": self.rho,
"epsilon": self.epsilon}
class Adagrad(Optimizer):
def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
updates = []
for p, g, a, c in zip(params, grads, accumulators, constraints):
new_a = a + g ** 2 # update accumulator
updates.append((a, new_a))
new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)
updates.append((p, c(new_p))) # apply constraints
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"epsilon": self.epsilon}
class Adadelta(Optimizer):
'''
Reference: http://arxiv.org/abs/1212.5701
'''
def __init__(self, lr=1.0, rho=0.95, epsilon=1e-6, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
delta_accumulators = [shared_zeros(p.get_value().shape) for p in params]
updates = []
for p, g, a, d_a, c in zip(params, grads, accumulators, delta_accumulators, constraints):
new_a = self.rho * a + (1 - self.rho) * g ** 2 # update accumulator
updates.append((a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * T.sqrt(d_a + self.epsilon) / T.sqrt(new_a + self.epsilon)
new_p = p - self.lr * update
updates.append((p, c(new_p))) # apply constraints
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * update ** 2
updates.append((d_a, new_d_a))
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"rho": self.rho,
"epsilon": self.epsilon}
class Adam(Optimizer):
'''
Reference: http://arxiv.org/abs/1412.6980
Default parameters follow those provided in the original paper
lambda is renamed kappa.
'''
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, kappa=1-1e-8, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
self.iterations = shared_scalar(0)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
updates = [(self.iterations, self.iterations+1.)]
i = self.iterations
beta_1_t = self.beta_1 * (self.kappa**i)
# the update below seems missing from the paper, but is obviously required
beta_2_t = self.beta_2 * (self.kappa**i)
for p, g, c in zip(params, grads, constraints):
m = theano.shared(p.get_value() * 0.) # zero init of moment
v = theano.shared(p.get_value() * 0.) # zero init of velocity
m_t = (beta_1_t * m) + (1 - beta_1_t) * g
v_t = (beta_2_t * v) + (1 - beta_2_t) * (g**2)
m_b_t = m_t / (1 - beta_1_t)
v_b_t = v_t / (1 - beta_2_t)
p_t = p - self.lr * m_b_t / (T.sqrt(v_b_t) + self.epsilon)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, c(p_t))) # apply constraints
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
"kappa": self.kappa}
# aliases
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
from .utils.generic_utils import get_from_module
def get(identifier, kwargs=None):
return get_from_module(identifier, globals(), 'optimizer', instantiate=True, kwargs=kwargs)
| mit | 6,855,293,725,898,082,000 | 32.122642 | 104 | 0.548277 | false | 3.44047 | true | false | false |
errikos/amtt | amtt/exporter/isograph/__init__.py | 1 | 2915 | """Exporter module for Isograph Availability Workbench."""
import logging
import networkx as nx
from itertools import count
from amtt.translator.ir import component_basename
from amtt.exporter import Exporter
from amtt.exporter.isograph.emitter.xml import XmlEmitter
from amtt.exporter.isograph.rbd import Rbd
from amtt.exporter.isograph.failure_models import fm_export
_logger = logging.getLogger(__name__)
class IsographExporter(Exporter):
"""Exporter to export the model to Isograph."""
def __init__(self, translator):
"""Initialize IsographExporter."""
self._translator = translator
self._emitter = XmlEmitter(translator.output_basedir)
@staticmethod
def normalize_block_names(ir_container):
"""Normalize the component (block) names.
Isograph imposes a 40 character limit for the component names.
In case the model uses template components, there is a big chance that
the names will grow very big in length. Therefore, we store the
base name in the description field and assign a unique integer (ID)
as the components name.
"""
g = ir_container.component_graph
if ir_container.uses_templates:
_logger.info('Template usage detected:')
_logger.info(' * Normalizing component names for Isograph')
# Create relabeling mapping.
# Each component name will be replaced with a number (ID).
relabel_mapping = {n: c for n, c in zip(g.nodes_iter(), count(1))}
del relabel_mapping['ROOT'] # We don't want to relabel ROOT
# Relabel and rename components graph
# -- copy=False means "relabel in-place"
nx.relabel_nodes(g, relabel_mapping, copy=False)
for u, v in nx.bfs_edges(g, 'ROOT'):
# -- get a hold of the associated object
vo = g.node[v]['obj']
# -- set base name as description
vo.description = component_basename(vo.name)
# -- set ID number as name
vo.name = v
# Note: No need to relabel or rename failures graph
def export(self):
"""Export the model to Isograph importable format."""
# Normalize block names, if necessary
self.normalize_block_names(self._translator.ir_container)
# Export RBD (blocks, nodes, connections)
self._export_rbd()
# Export failure model definitions
self._export_failure_models()
# Write output file
self._emitter.commit()
def _export_rbd(self):
# Create block diagram from input
rbd = Rbd()
rbd.from_ir_container(self._translator.ir_container)
# Dump reliability block diagram to output
rbd.serialize(self._emitter)
def _export_failure_models(self):
fm_export(self._translator.ir_container, self._emitter)
| gpl-3.0 | 8,075,975,394,118,118,000 | 39.486111 | 78 | 0.64048 | false | 4.236919 | false | false | false |
corredD/upy | autodeskmaya/mayaHelper.py | 1 | 118218 |
"""
Copyright (C) <2010> Autin L. TSRI
This file git_upy/autodeskmaya/mayaHelper.py is part of upy.
upy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
upy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with upy. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 5 23:30:44 2010
@author: Ludovic Autin - [email protected]
"""
import sys, os, os.path, struct, math, string
from math import *
#import numpy
from types import StringType, ListType
import maya
from maya import cmds,mel,utils
import maya.OpenMaya as om
import maya.OpenMayaAnim as oma
import maya.OpenMayaFX as omfx
import pymel.core as pm
#base helper class
from upy import hostHelper
if hostHelper.usenumpy:
import numpy
from numpy import matrix
from upy.hostHelper import Helper
lefthand =[[ 1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]]
from upy.transformation import decompose_matrix
class MayaSynchro:
#period problem
def __init__(self,cb=None, period=0.1):
self.period = period
self.callback = None
self.timeControl = oma.MAnimControl()
if cb is not None :
self.doit = cb
def change_period(self,newP):
self.period = newP
self.remove_callback()
self.set_callback()
def set_callback(self):
self.callback = om.MTimerMessage.addTimerCallback(self.period,self.doit)
def remove_callback(self):
om.MMessage.removeCallback(self.callback)
def doit(self,*args,**kw):#period,time,userData=None):
pass
class mayaHelper(Helper):
"""
The maya helper abstract class
============================
This is the maya helper Object. The helper
give access to the basic function need for create and edit a host 3d object and scene.
"""
SPLINE = "kNurbsCurve"
INSTANCE = "kTransform"
MESH = "kTransform"
POLYGON = "kMesh"#"kTransform"
# MESH = "kMesh"
EMPTY = "kTransform"
BONES="kJoint"
PARTICULE = "kParticle"
SPHERE = "sphere"
CYLINDER = "cylinder"
CUBE = "cube"
IK="kIkHandle"
msutil = om.MScriptUtil()
pb = False
pbinited = False
host = "maya"
def __init__(self,master=None,**kw):
Helper.__init__(self)
self.updateAppli = self.update
self.Cube = self.box
self.Box = self.box
self.Geom = self.newEmpty
#self.getCurrentScene = c4d.documents.GetActiveDocument
self.IndexedPolygons = self.polygons
self.Points = self.PointCloudObject
self.pb = True
self.hext = "ma"
self.timeline_cb={}
self.LIGHT_OPTIONS = {"Area" : maya.cmds.ambientLight,
"Sun" : maya.cmds.directionalLight,
"Spot":maya.cmds.spotLight}
def fit_view3D(self):
pass#
def resetProgressBar(self,max=None):
"""reset the Progress Bar, using value"""
if self.pb :
gMainProgressBar = maya.mel.eval('$tmp = $gMainProgressBar');
maya.cmds.progressBar(gMainProgressBar, edit=True, endProgress=True)
self.pbinited = False
# self.pb = False
# maya.cmds.progressBar(maya.pb, edit=True, maxValue=max,progress=0)
def progressBar(self,progress=None,label=None):
""" update the progress bar status by progress value and label string
@type progress: Int/Float
@param progress: the new progress
@type label: string
@param label: the new message to put in the progress status
"""
if self.pb :
gMainProgressBar = maya.mel.eval('$tmp = $gMainProgressBar');
if not self.pbinited :
cmds.progressBar( gMainProgressBar,
edit=True,
beginProgress=True,
isInterruptable=False,
status=label,
maxValue=100)
# if progress == 1 :
# prev = cmds.progressBar(gMainProgressBar,q=1,progress=1)
# progress = prev/100. + 0.1
# progress*=100.
if label is not None and progress is None :
cmds.progressBar(gMainProgressBar, edit=True, status = label)
elif label is not None and progress is not None:
cmds.progressBar(gMainProgressBar, edit=True, progress=progress*100.,status = label)
elif label is None and progress is not None:
cmds.progressBar(gMainProgressBar, edit=True, progress=progress*100.)
if progress == 1 or progress == 100.:
self.resetProgressBar()
#maxValue = 100
#did not work
#maya.cmds.progressBar(maya.pb, edit=True, progress=progress*100)
# cmds.progressBar(maya.pb, edit=True, step=1)
#maya.cmds.progressBar(maya.pb, edit=True, step=1)
def synchronize(self,cb):
self.timeline_cb[cb] = MayaSynchro(cb=cb,period=0.05)
self.timeline_cb[cb].set_callback()
def unsynchronize(self,cb):
self.timeline_cb[cb].remove_callback()
def update(self,):
#how do I update the redraw
cmds.refresh()
def updateAppli(self,):
#how do I update the redraw
cmds.refresh()
def checkName(self,name):
invalid=[]
if type(name) is None :
print ("None name or not a string",name)
return ""
#sometime the name is a list ie [u'name']
if type(name) is list or type(name) is tuple :
if len(name) == 1 :
name = name[0]
elif len(name) == 2 :
name = name[1]#transform node
else :
name = name[0] #?
if (type(name) is not str and type(name) is not unicode) :
print ("not a string",name,type(name))
return ""
if not len(name):
print ("empty name",name)
for i in range(9):
invalid.append(str(i))
if type(name) is list or type(name) is tuple:
name = name[0]
if type(name) is not str and type(name) is not unicode:
name = name.name()
if len(name) and name[0] in invalid:
name= name[1:]
#also remove some character and replace it by _
name=name.replace(":","_").replace(" ","_").replace("'","").replace("-","_")
return name
def setCurrentSelection(self,obj):
if obj is None :
return
if type (obj) is list or type (obj) is tuple :
for o in obj :
cmds.select(self.getObject(o))
else :
cmds.select(self.getObject(obj))
def getCurrentSelection(self):
slist = om.MSelectionList()
if not slist :
return []
om.MGlobal.getActiveSelectionList(slist)
selection = []
slist.getSelectionStrings(selection)
return selection
def checkPrimitive(self,object):
try :
cmds.polySphere(object,q=1,r=1)
return "sphere"
except :
pass
try :
cmds.sphere(object,q=1,r=1)
return "sphere"
except :
pass
try :
cmds.polyCube(object,q=1,w=1)
return "cube"
except :
pass
try :
cmds.polyCylinder(object,q=1,r=1)
return "cylinder"
except :
pass
return None
def getType(self,object):
#first tryto see if isa primitive
prim = self.checkPrimitive(object)
if prim is not None :
return prim
object = self.getNode(object)
if hasattr(object,"apiTypeStr"):
# print (object.apiTypeStr())
return object.apiTypeStr()
else :
# print (type(object))
return type(object)
# return type(object)
def getMName(self,o):
return o.name()
def setName(self,o,name):
if o is None :
return
cmds.rename( self.checkName(o), name, ignoreShape=False)
def getName(self,o):
if o is None: return ""
if type(o) == str or type(o) == unicode :
name = o.replace(":","_").replace(" ","_").replace("'","").replace("-","_")
elif type(o) == unicode : name = o
elif type(o) is om.MFnMesh:
return o
elif hasattr(o,"name") :
if type(o.name) == str :
return o.name
else : return o.name()
elif type(o) is list or type(o) is tuple:
name=o[0]
else : name=o
return name
def getMObject(self,name):
# Create a selection list, get an MObject of the nodes which name is name
selectionList = om.MSelectionList()
selectionList.add( name ) #should be unic..
node = om.MObject()
selectionList.getDependNode( 0, node )
#//Create a function set, connect to it,
fnDep = om.MFnDependencyNode(node)
#print fnDep.name() #object name
#print fnDep.typeName() #type name ie mesh, transform etc..
return node,fnDep
def getObject(self,name,doit=True):
if type(name) is list or type(name) is tuple :
if len(name) == 1 :
name = name[0]
elif len(name) == 2 :
name = name[1]#transform node
else :
name = name[0] #?
name=self.checkName(name)
if name.find(":") != -1 :
name=name.replace(":","_").replace(" ","_").replace("'","").replace("-","_")
if doit :
name=cmds.ls(name)
if len(name)==0:
return None
if len(name) == 1 :
return name[0]
return name
def checkIsMesh(self,poly):
if type(poly) is str or type(poly) is unicode :
mesh = self.getMShape(poly)#dagPath
else :
#have to a object shape node or dagpath
mesh = poly
try :
meshnode = om.MFnMesh(mesh)
return meshnode
except :
return mesh
def getMesh(self,name):
mesh = None
if type(name) != str:
return name
# path = om.MDagPath()
try :
name = self.checkName(name)
mesh = cmds.ls(name)#NMesh.GetRaw(name)
except:
mesh = None
return mesh
def getMeshFrom(self,obj):
if type(obj) is not str and type(obj) is not unicode:
obj = self.getMName(obj)
return self.getMShape(obj)
def getTransformNode(self,name):
if type(name) is list :
name = name[0]
if type(name) is str or type(name) is unicode :
name = self.checkName(name)
node = self.getNode(name)
else :
node = name
dag = om.MFnDagNode(node)
path = om.MDagPath()
dag.getPath(path)
return path.transform(),path
def getMShape(self,name,):
# print name,type(name)
if type(name) is list :
name = name[0]
if type(name) is str or type(name) is unicode :
name = self.checkName(name)
node = self.getNode(name)
else :
node = name
dag = om.MFnDagNode(node)
path = om.MDagPath()
dag.getPath(path)
# self.msutil.createFromInt(0)
# pInt = self.msutil.asUintPtr()
# path.numberOfShapesDirectlyBelow(pInt)
try :
path.extendToShape()
return path
except :
# if self.msutil.getUint(pInt) == 0 :
node = path.child(0)
return self.getMShape(node)
#problem with primitive
# try :
# path.extendToShape()
# except :
# path = None
# return path
def deleteObject(self,obj):
sc = self.getCurrentScene()
if type(obj) is str or type(obj) is unicode:
obj=self.checkName(obj)
else :
if type(obj) is list or type(obj) is tuple :
for o in obj :
self.deleteObject(o)
else :
obj = obj.name()
try :
#print "del",obj
cmds.delete(obj)
except:
print "problem deleting ", obj
#######Special for maya#######################
def getNode( self,name ):
# print "getNode",type(name)
# if type(name) != str :
# return name
name = self.checkName(name)
selectionList = om.MSelectionList()
selectionList.add( name )
node = om.MObject()
selectionList.getDependNode( 0, node )
return node
def getNodePlug(self, attrName, nodeObject ):
"""
example:
translatePlug = nameToNodePlug( "translateX", perspNode )
print "Plug name: %s" % translatePlug.name()
print "Plug value %g" % translatePlug.asDouble()
"""
depNodeFn = om.MFnDependencyNode( nodeObject )
attrObject = depNodeFn.attribute( attrName )
plug = om.MPlug( nodeObject, attrObject )
return plug
################################################
def newLocator(self,name,location=None,**kw):
name = self.checkName(name)
if name.find(":") != -1 : name=name.replace(":","_")
empty=cmds.spaceLocator( n=name, a=True)
parent = None
if "parent" in kw :
parent = kw["parent"]
self.reParent(empty,parent)
return str(empty)
def newEmpty(self,name,location=None,**kw):
#return self.newLocator(name,location=location, **kw)
name = self.checkName(name)
if name.find(":") != -1 : name=name.replace(":","_")
empty=cmds.group( em=True, n=name)
parent = None
if "parent" in kw :
parent = kw["parent"]
self.reParent(empty,parent)
return str(empty)
def updateMasterInstance(self,master, newobjects,instance=True, **kw):
"""
Update the reference of the passed instance by adding/removing-hiding objects
* overwrited by children class for each host
>>> sph = helper.Sphere("sph1")
>>> instance_sph = helper.newInstance("isph1",sph,location = [10.0,0.0,0.0])
@type instance: string/hostObj
@param instance: name of the instance
@type objects: list hostObject/string
@param objects: the list of object to remove/add to the instance reference
@type add: bool
@param add: if True add the objec else remove
@type hide: bool
@param hide: hide instead of remove
@type kw: dictionary
@param kw: you can add your own keyword, but it should be interpreted by all host
"""
#the instance shoud point to an empy that have shape as child
#what we should do is eitherduplicae or reParent the the new object under this master parent
#or usethe replace command ? use particule ?
#replace the mesh node of the master by the given ones....
#hide and remove every previous children....
chs = self.getChilds(master)
for o in chs :
r=cmds.duplicate(o, renameChildren=True)
print r
cmds.delete(chs)#or move or uninstance ?
if instance :
n=[]
for o in newobjects :
name = self.getName(master)+"Instance"
i1=self.getObject(name+"1")
if i1 is not None :
cmds.delete(i1)
i=self.newInstance(name,o,parent=master)
else :
self.reParent(newobjects,master)
def newMInstance(self,name,object,location=None,
hostmatrice=None,matrice=None,parent=None,**kw):
#first create a MObject?
#only work on Mes
name = self.checkName(name)
fnTrans = om.MFnTransform()
minstance = fnTrans.create()
fnTrans.setName(name)
#now add the child as an instance.
#print fnTrans.name()
#is this will work withany object ?
object=self.getNode(object)#or the shape ?
fnTrans.addChild(object,fnTrans.kNextPos,True)
#print name, object , fnTrans
if matrice is not None and isinstance(matrice,om.MTransformationMatrix):
hostmatrice=matrice
matrice = None
if hostmatrice is not None and not isinstance(hostmatrice,om.MTransformationMatrix):
matrice = hostmatrice
hostmatrice = None
if location is not None :
fnTrans.setTranslation(self.vec2m(location),om.MSpace.kPostTransform)
elif hostmatrice is not None :
fnTrans.set(hostmatrice)
elif matrice is not None :
#first convert
hmatrice = self.matrixp2m(matrice)
fnTrans.set(hmatrice)
if parent is not None:
mparent = self.getNode(parent)
# onode = om.MFnDagNode(mobj)
# print "name",fnTrans.name()
oparent = om.MFnDagNode(mparent)
oparent.addChild(self.getNode(fnTrans.name()),oparent.kNextPos,False)
return fnTrans.name()
def newInstance(self,name,object,location=None,hostmatrice=None,matrice=None,
parent=None,material=None,**kw):
#instance = None#
#instance parent = object
#instance name = name
# return self.newMInstance(name,object,location=location,
# hostmatrice=hostmatrice,matrice=matrice,parent=parent,**kw)
#
name = self.checkName(name)
instance = cmds.instance(object,name=name)
if location != None :
#set the position of instance with location
cmds.move(float(location[0]),float(location[1]),float(location[2]), name,
absolute=True )
if matrice is not None :
if self._usenumpy :
#matrice = numpy.array(matrice)#matrix(matrice)*matrix(lefthand)#numpy.array(matrice)
#transpose only rotation
matrice = numpy.array(matrice).transpose()#we do transpoe hee
#m = matrice.copy()
# m[0,:3]=matrice[0,:3]#thi work with numpy
# m[1,:3]=matrice[1,:3]
# m[2,:3]=matrice[2,:3]
#matrice[:3,:3] = matrice[:3,:3].transpose()
hm = matrice.reshape(16,).tolist()
#shoudl I apply some transformatio first ?
cmds.xform(name, a=True, m=hm,roo="xyz")#a for absolute
else :
self.setTransformation(instance[0],mat=matrice)
#set the instance matrice
#self.setObjectMatrix(self,object,matrice=matrice,hostmatrice=hostmatrice)
if parent is not None:
self.reParent(instance,parent)
if material is not None:
self.assignMaterial(instance,material)
return instance
#alias
setInstance = newInstance
def matrixToParticles(self,name,matrices,vector=[0.,1.,0.],transpose=True,**kw):#edge size ?
#blender user verex normal for rotated the instance
#quad up vector should use the inpu vector
axe=self.rerieveAxis(vector)
#axe="+Y"
quad=numpy.array(self.quad[axe])#*10.0
print ("matrixToParticles",axe,vector,quad)
# f=[0,1,2,3]
v=[]
f=[]
e=[]
n=[]
vi=0
#one mat is
#rot[3][:3] tr
# rot[:3,:3] rot
#create particle system
# obj = self.checkName(obj)
# partO=self.getMShape(obj) #shape..
# fnP = omfx.MFnParticleSystem(partO)
# oriPsType = fnP.renderType()
rot=om.MVectorArray()#fnP.count())
pos=om.MVectorArray()#fnP.count())
tr=[]
#set position and rotation
for i,m in enumerate(matrices):
mat = numpy.array(m)
if transpose :
mat = numpy.array(m).transpose()
# t = m[3][:3]
# rot = m[:3,:3]
scale, shear, euler, translate, perspective=decompose_matrix(mat)
tr.append(translate.tolist())
#need euler angle
# e=self.FromMat(rot).rotation().asEulerRotation()
p = om.MVector( float(translate[0]),float(translate[1]),float(translate[2]) )
pos.append(p)
r = om.MVector( float(euler[0]),float(euler[1]),float(euler[2]) )/(math.pi) *180
rot.append(r)
# fnP.setPerParticleAttribute("rotationPP",rot)
# fnP.setPerParticleAttribute("position",pos)
part,partShape= pm.nParticle(n=name+"_ps",position = tr)
# part,partShape=cmds.particle(n=name+"_ps",p=list(tr))
pm.setAttr('nucleus1.gravity', 0.0)#?
# cmds.setAttr(partShape+'.computeRotation',1)
partShape.computeRotation.set(True)
pm.addAttr(partShape, ln = 'rotationPP', dt = 'vectorArray')
pm.addAttr(partShape, ln = 'rotationPP0', dt = 'vectorArray')
particle_fn = omfx.MFnParticleSystem(partShape.__apimobject__())
particle_fn.setPerParticleAttribute('rotationPP', rot)
particle_fn.setPerParticleAttribute('rotationPP0', rot)
if 'parent' in kw and kw['parent'] is not None:
parent = self.getObject(kw['parent'])
self.reParent(name+"_ps",parent)
return part,partShape
#particleInstancer -addObject
#-object locator1 -cycle None -cycleStep 1 -cycleStepUnits Frames
#-levelOfDetail Geometry -rotationUnits Degrees
#-rotationOrder XYZ -position worldPosition -age age crn_A_clouddsShape;
def instancePolygon(self,name, matrices=None,hmatrices=None, mesh=None,parent=None,
transpose=False,globalT=True,**kw):
hm = False
if hmatrices is not None :
matrices = hmatrices
hm = True
if matrices == None : return None
if mesh == None : return None
instance = []
#print len(matrices)#4,4 mats
if self.instance_dupliFace:
v=[0.,1.,0.]
if "axis" in kw and kw["axis"] is not None:
v=kw["axis"]
print ("axis",v)
o = self.getObject(name+"_pis")
if o is None :
# o,m=self.matrixToVNMesh(name,matrices,vector=v)
particle,partShape=self.matrixToParticles(name,matrices,vector=v,
transpose=transpose,parent=parent)
p_instancer = pm.PyNode(pm.particleInstancer(
partShape, addObject=True, object=pm.ls(mesh),name=name+"_pis",
cycle='None', cycleStep=1, cycleStepUnits='Frames',
levelOfDetail='Geometry', rotationUnits='Degrees',
rotationOrder='XYZ', position='worldPosition', age='age'))
pm.particleInstancer(partShape, name = p_instancer, edit = True, rotation = "rotationPP")
if parent is not None :
self.reParent(name+"_pis",parent)
# cmds.particleInstancer(
# partShape, addObject=True, object=self.getMShape(mesh),
# cycle='None', cycleStep=1, cycleStepUnits='Frames',
# levelOfDetail='Geometry', rotationUnits='Degrees',
# rotationOrder='XYZ', position='worldPosition', age='age')
# cmds.particleInstancer(partShape, name = "p_instancer",
# edit = True, rotation = "rotationPP")
else :
#update
pass
return name+"_pis"
#rotation checkbox->use normal
else :
for i,mat in enumerate(matrices):
inst = self.getObject(name+str(i))
if inst is None :
#Minstance?
if hm :
inst=self.newInstance(name+str(i),mesh,hostmatrice=mat,
parent=parent,globalT=globalT)
else :
inst=self.newInstance(name+str(i),mesh,matrice=mat,
parent=parent,globalT=globalT)
instance.append(inst)
return instance
def resetTransformation(self,name):
m= [1.,0.,0.,0.,
0.,1.,0.,0.,
0.,0.,1.,0.,
0.,0.,0.,0.]
cmds.xform(name, a=True, m=m)
def setObjectMatrix(self,object,matrice,hostmatrice=None,**kw):
"""
set a matrix to an hostObject
@type object: hostObject
@param object: the object who receive the transformation
@type hostmatrice: list/Matrix
@param hostmatrice: transformation matrix in host format
@type matrice: list/Matrix
@param matrice: transformation matrix in epmv/numpy format
"""
#have to manipulate the DAG/upper transform node...
#let just take the owner Transofrm node of the shape
#we should be able to setAttr either 'matrix' or 'worldMatrix'
object = self.getObject(object)
if hostmatrice !=None :
#set the instance matrice
matrice=hostmatrice
if matrice != None:
#convert the matrice in host format
#set the instance matrice
pass
transpose = True
if "transpose" in kw :
transpose = kw["transpose"]
if matrice is not None :
if self._usenumpy :
#matrice = numpy.array(matrice)#matrix(matrice)*matrix(lefthand)#numpy.array(matrice)
#transpose only rotation
matrice = numpy.array(matrice)
if transpose :
matrice=matrice.transpose()#we do transpoe hee
#m = matrice.copy()
# m[0,:3]=matrice[0,:3]#thi work with numpy
# m[1,:3]=matrice[1,:3]
# m[2,:3]=matrice[2,:3]
#matrice[:3,:3] = matrice[:3,:3].transpose()
hm = matrice.reshape(16,).tolist()
#shoudl I apply some transformatio first ?
cmds.xform(object, a=True, m=hm,roo="xyz")#a for absolute
else :
self.setTransformation(object,mat=matrice)
def concatObjectMatrix(self,object,matrice,hostmatrice=None):
"""
apply a matrix to an hostObject
@type object: hostObject
@param object: the object who receive the transformation
@type hostmatrice: list/Matrix
@param hostmatrice: transformation matrix in host format
@type matrice: list/Matrix
@param matrice: transformation matrix in epmv/numpy format
"""
#get current transformation
if hostmatrice !=None :
#compute the new matrix: matrice*current
#set the new matrice
pass
if matrice != None:
#convert the matrice in host format
#compute the new matrix: matrice*current
#set the new matrice
pass
def addObjectToScene(self,doc,obj,parent=None,**kw):
#its just namely put the object under a parent
#return
if obj == None : return
if parent is not None :
if type(obj) is list or type(obj) is tuple :
if len(obj) == 1 :
obj = obj[0]
elif len(obj) == 2 :
obj = obj[1]#transform node
else :
obj = obj[0] #?
obj=self.checkName(obj)
parent=self.checkName(parent)
#print obj,parent
# cmds.parent( obj, parent)
self.parent(obj, parent)
def parent(self,obj,parent,instance=False):
if type(parent) == unicode :
parent = str(parent)
if type(parent) != str :
print ("parent is not String ",type(parent))
return
# print ("parenting ", obj,parent, instance )
mobj = self.getNode(obj)
mparent = self.getNode(parent)
# onode = om.MFnDagNode(mobj)
oparent = om.MFnDagNode(mparent)
# print ("parenting dag node", obj,parent, mobj,oparent.kNextPos,instance )
oparent.addChild(mobj,oparent.kNextPos,instance)
def reParent(self,obj,parent,instance=False):
if parent == None :
print ("parent is None")
return
if type(obj) is not list and type(obj) is not tuple :
obj = [obj,]
try :
[self.parent(o,parent,instance=instance) for o in obj]
except :
print ("failure")
def getChilds(self,obj):
if type(obj) is str or type(obj) is unicode:
o = self.checkName(obj)
else :
o = self.getName(obj)
childs= cmds.listRelatives(o, c=True)
if childs is None :
return []
else :
return childs
def addCameraToScene(self,name,Type='persp',focal=30.0,center=[0.,0.,0.],sc=None):
# Create a camera and get the shape name.
cameraName = cmds.camera(n=name)
cameraShape = cameraName[1]
# Set the focal length of the camera.
cmds.camera(cameraShape, e=True, fl=focal)
#change the location
cmds.move(float(center[0]),float(center[1]),float(center[2]), cameraName[0], absolute=True )
#should I rotate it
cmds.rotate( 0, '0', '360deg',cameraName[0] )
# Change the film fit type.
#cmds.camera( cameraShape, e=True, ff='overscan' )
return cameraName
def addLampToScene(self,name,Type='Area',rgb=[1.,1.,1.],dist=25.0,energy=1.0,
soft=1.0,shadow=False,center=[0.,0.,0.],sc=None,**kw):
#print Type
#each type have a different cmds
lcmd = self.LIGHT_OPTIONS[Type]
light = lcmd(n=name)
# light = cmds.pointLight(n=name)
#cmds.pointLight(light,e=1,i=energy,rgb=rgb,ss=soft,drs=dist)
lcmd(light,e=1,i=energy)
lcmd(light,e=1,ss=soft)
# cmds.pointLight(light,e=1,drs=dist)
lcmd(light,e=1,rgb=rgb)
cmds.move(float(center[0]),float(center[1]),float(center[2]), light, absolute=True )
return light
def toggleDisplay(self,ob,display,**kw):
# ob = self.getObject(ob)
# if ob is None :
# return
# ob=self.checkName(ob)
# if display :
# cmds.showHidden(ob)
# else :
# cmds.hide(ob)
if ob is None :
return
node = self.getNode(self.checkName(ob))
if node is None :
return
attrDis = self.getNodePlug("visibility",node)
attrDis.setBool(bool(display))
# def toggleXray(self,object,xray):
# o = self.getObject(object)
# cmds.select(o)
# cmds.displySurface(xRay = True)
def getVisibility(self,obj,editor=True, render=False, active=False):
#0 off, 1#on, 2 undef
node = self.getNode(self.checkName(obj))
attrDis = self.getNodePlug("visibility",node)
if editor and not render and not active:
return attrDis.asBool()
elif not editor and render and not active:
return attrDis.asBool()
elif not editor and not render and active:
return attrDis.asBool()
else :
return attrDis.get(),attrDis.get(),attrDis.get()
def getTranslation(self,name,absolue=True):
name = self.checkName(name)
return self.FromVec(cmds.xform(name,q=1,ws=int(absolue),t=1))
def getTranslationOM(self,name):
node = self.getNode(name)
fnTrans = om.MFnTransform(node,)
return fnTrans.getTranslation(om.MSpace.kWorld)#kPostTransform)
def setTranslation(self,name,pos):
node = self.getNode(name)
fnTrans = om.MFnTransform(node,)
newT = self.vec2m(pos)
fnTrans.setTranslation(newT,om.MSpace.kPostTransform)
def translateObj(self,obj,position,use_parent=False):
#is om would be faster ?
if len(position) == 1 : c = position[0]
else : c = position
#print "upadteObj"
newPos=c#c=c4dv(c)
o=self.getObject(obj)
if use_parent :
parentPos = self.getPosUntilRoot(obj)#parent.get_pos()
c = newPos - parentPos
cmds.move(float(c[0]),float(c[1]),float(c[2]), o, absolute=True )
else :
cmds.move(float(c[0]),float(c[1]),float(c[2]), o, absolute=True )
def scaleObj(self,obj,sc):
obj = self.checkName(obj)
if type(sc) is float :
sc = [sc,sc,sc]
cmds.scale(float(sc[0]),float(sc[1]),float(sc[2]), obj,absolute=True )
def getScale(self,name,absolue=True,**kw):
node = self.getNode(name)
fnTrans = om.MFnTransform(node,)
# First create an array and a pointer to it
scaleDoubleArray = om.MScriptUtil()
scaleDoubleArray.createFromList( [0.0, 0.0, 0.0], 3 )
scaleDoubleArrayPtr = scaleDoubleArray.asDoublePtr()
# Now get the scale
fnTrans.getScale( scaleDoubleArrayPtr )
# Each of these is a decimal number reading from the pointer's reference
x_scale = om.MScriptUtil().getDoubleArrayItem( scaleDoubleArrayPtr, 0 )
y_scale = om.MScriptUtil().getDoubleArrayItem( scaleDoubleArrayPtr, 1 )
z_scale = om.MScriptUtil().getDoubleArrayItem( scaleDoubleArrayPtr, 2 )
return [x_scale,y_scale,z_scale]#kPostTransform) or om.MVector(v[0], v[1], v[2])?
def getSize(self,obj):
#take degree
obj = self.checkName(obj)
meshnode = self.getMShape(obj)
try :
mesh = om.MFnMesh(meshnode)
except :
return [1,1,1]
obj = self.getMName(mesh)
x=cmds.getAttr(obj+'.width')
y=cmds.getAttr(obj+'.height')
z=cmds.getAttr(obj+'.depth')
return [x,y,z]
def rotateObj(self,obj,rot):
#take degree
obj = self.checkName(obj)
cmds.setAttr(obj+'.rx',degrees(float(rot[0])))
cmds.setAttr(obj+'.ry',degrees(float(rot[1])))
cmds.setAttr(obj+'.rz',degrees(float(rot[2])))
def getTransformation(self,name):
node = self.getNode(name)
fnTrans = om.MFnTransform(node)
mmat = fnTrans.transformation()
#maya matrix
return mmat
def setTransformation(self,name,mat=None,rot=None,scale=None,trans=None,order="str",**kw):
node = self.getNode(name)
fnTrans = om.MFnTransform(node)
if mat is not None :
if isinstance(mat,om.MTransformationMatrix):
fnTrans.set(mat)
else :
fnTrans.set(self.matrixp2m(mat))
if trans is not None :
fnTrans.setTranslation(self.vec2m(trans),om.MSpace.kPostTransform)
if rot is not None :
rotation = om.MEulerRotation (rot[0], rot[1], rot[2])
fnTrans.setRotation(rotation)
if scale is not None :
fnTrans.setScale(self.arr2marr(scale))
def ObjectsSelection(self,listeObjects,typeSel="new"):
"""
Modify the current object selection.
@type listeObjects: list
@param listeObjects: list of object to joins
@type typeSel: string
@param listeObjects: type of modification: new,add,...
"""
dic={"add":True,"new":False}
sc = self.getCurrentScene()
for obj in listeObjects:
cmds.select(self.getObject(obj),add=dic[typeSel])
#Put here the code to add/set an object to the current slection
#[sc.SetSelection(x,dic[typeSel]) for x in listeObjects]
def JoinsObjects(self,listeObjects):
"""
Merge the given liste of object in one unique geometry.
@type listeObjects: list
@param listeObjects: list of object to joins
"""
sc = self.getCurrentScene()
#put here the code to add the liste of object to the selection
cmds.select(self.getObject(listeObjects[0]))
for i in range(1,len(listeObjects)):
cmds.select(listeObjects[i],add=True)
cmds.polyUnite()
#no need to joins? but maybe better
#then call the command/function that joins the object selected
# c4d.CallCommand(CONNECT)
#need face indice
def color_mesh_perVertex(self,mesh,colors,faces=None,perVertex=True,
facesSelection=None,faceMaterial=False):
if colors[0] is not list and len(colors) == 3 :
colors = [colors,]
if not isinstance(mesh,maya.OpenMaya.MFnMesh):
if self.getType(mesh) != self.POLYGON and self.getType(mesh) != self.MESH:
return False
mcolors=om.MColorArray()
iv=om.MIntArray()
meshnode = mesh
# print mesh
if type(mesh) is str or type(mesh) is unicode :
meshnode = self.getMShape(mesh)
try :
mesh = om.MFnMesh(meshnode)
except:
return False
mesh.findPlug('displayColors').setBool(True)
if not isinstance(mesh,maya.OpenMaya.MFnMesh):
return
nv=mesh.numVertices()
nf=mesh.numPolygons()
mfaces = self.getMeshFaces(meshnode)
if facesSelection is not None :
if type(facesSelection) is bool :
fsel,face_sel_indice = self.getMeshFaces(mesh,selected=True)
else :
face_sel_indice = facesSelection
fsel=[]
for i in face_sel_indice:
fsel.append(mfaces[i])
vsel=[]
for f in fsel:
for v in f:
if v not in vsel:
vsel.append(v)
mfaces = fsel
nf = len(fsel)
nv = len(vsel)
# print "selected ",face_sel_indice
#check if its ok
if len(colors) == nv:
perVertex = True
elif len(colors) == nf:
perVertex = False
if perVertex:
N=range(nv)
else :
N=range(nf)
if facesSelection is not None :
N = face_sel_indice
perVertex = False
for k,i in enumerate(N) :
if len(colors) == 1 : ncolor = colors[0]
else :
if k >= len(colors) :
ncolor = [0.,0.,0.] #problem
else :
ncolor = colors[i]
#print ncolor
#if max(ncolor) < 1 : ncolor = map( lambda x: x*255, ncolor)
col=om.MColor(float(ncolor[0]),float(ncolor[1]),float(ncolor[2]))
#print ncolor
mcolors.append(col)
iv.append(int(i))
# print "i",i,ncolor
#mesh.setVertexColor(col,int(i))
if perVertex:
mesh.setVertexColors(mcolors,iv)
else :
# print iv#should be the fdace index
mesh.setFaceColors(mcolors,iv)
return True
###################MATERIAL CODE FROM Rodrigo Araujo#####################################################################################
#see http://linil.wordpress.com/2008/01/31/python-maya-part-2/
def createMaterial(self, name, color, type ):
name = self.checkName(name)
mat=cmds.ls(name, mat=True)
if len(mat)==0: #create only if mat didnt exist already
#shading group
shaderSG = cmds.sets(renderable=True, noSurfaceShader=True, empty=True,
name=name+"SG" )
#material
cmds.shadingNode( type, asShader=True, name=name )
#phong ?
#cmds.setAttr((shader+ '.reflectivity'), 0)# no rayTrace
#cmds.setAttr((shader+ '.cosinePower'), 3)
cmds.setAttr( name+".color", color[0], color[1], color[2],
type="double3")
cmds.connectAttr(name+".outColor", shaderSG+".surfaceShader")
def createTexturedMaterial(self,name,filename):
name = self.checkName(name)
mat=cmds.ls(name, mat=True)
if len(mat)==0: #create only if mat didnt exist already
#shading group
shaderSG = cmds.sets(renderable=True, noSurfaceShader=True, empty=True,
name=name+"SG" )
#material
cmds.shadingNode("lambert", asShader=True, name=name )
cmds.connectAttr(name+".outColor", shaderSG+".surfaceShader")
#create the texture and connect it
texture = cmds.shadingNode('file', asTexture=True,name=name+"Texture")
cmds.connectAttr(name+"Texture"+'.outColor', name+".color")
cmds.setAttr(name+"Texture"+'.fileTextureName', filename, type='string')
return name
def create_mMayaMaterials(self):
existingSGs = cmds.ls(type = 'shadingEngine')
shaderHits = 0;
shaderSG, shaderSGAmbOcc, ambOcc, ramp = '', '', '', ''
for existingSG in existingSGs:
if mel.eval('attributeExists mMaya_atomShaderSG ' +existingSG):
shaderSG = existingSG
shaderHits += 1
if mel.eval('attributeExists mMaya_atomShaderSGAmbOcc ' +existingSG):
shaderSGAmbOcc = existingSG
shaderHits += 1
existingAmbOccs = cmds.ls(type = 'mib_amb_occlusion')
for existingAmbOcc in existingAmbOccs:
if mel.eval('attributeExists mMaya_atomShaderAmbOcc ' +existingAmbOcc):
ambOcc = existingAmbOcc
shaderHits += 1
existingRamps = cmds.ls(type = 'ramp')
for existingRamp in existingRamps:
if mel.eval('attributeExists mMaya_atomShaderRGBRamp ' +existingRamp):
ramp = existingRamp
shaderHits += 1
if shaderHits == 4:
return shaderSG, shaderSGAmbOcc, ambOcc, ramp
elif shaderHits == 0:
shader = cmds.shadingNode('phong', asShader = 1, name = ("atomShader"))
cmds.setAttr((shader+ '.reflectivity'), 0)# no rayTrace
cmds.setAttr((shader+ '.cosinePower'), 3)
shaderSG = cmds.sets(renderable = 1, noSurfaceShader = 1, empty = 1)
cmds.addAttr(shaderSG, ln = 'mMaya_atomShaderSG', at = 'bool', h = 1)
cmds.connectAttr((shader+ '.outColor'), (shaderSG+ '.surfaceShader'))
shaderAmbOcc = cmds.shadingNode('phong', asShader = 1, name = ("atomShaderAmbOcc"))
cmds.setAttr((shaderAmbOcc+ '.reflectivity'), 0)
cmds.setAttr((shaderAmbOcc+ '.cosinePower'), 3)
cmds.setAttr((shaderAmbOcc+ '.ambientColor'), 0.7, 0.7, 0.7)
cmds.setAttr((shaderAmbOcc+ '.diffuse'), 0.2)
ambOcc = cmds.createNode('mib_amb_occlusion')
cmds.addAttr(ambOcc, ln = 'mMaya_atomShaderAmbOcc', at = 'bool', h = 1)
cmds.connectAttr((ambOcc+ '.outValue'), (shaderAmbOcc+ '.color'))
cmds.connectAttr((shaderAmbOcc+ '.color'), (shaderAmbOcc+ '.specularColor'))
partySampler = cmds.createNode('particleSamplerInfo')
cmds.connectAttr((partySampler+ '.outTransparency'), (shader+ '.transparency'))
cmds.connectAttr((partySampler+ '.outIncandescence'), (shader+ '.incandescence'))
cmds.connectAttr((partySampler+ '.outColor'), (shader+ '.color'))
cmds.connectAttr((partySampler+ '.outTransparency'), (shaderAmbOcc+ '.transparency'))
cmds.connectAttr((partySampler+ '.outIncandescence'), (shaderAmbOcc+ '.incandescence'))
cmds.connectAttr((partySampler+ '.outColor'), (ambOcc+ '.bright'))
shaderSGAmbOcc = cmds.sets(renderable = 1, noSurfaceShader = 1, empty = 1)
cmds.addAttr(shaderSGAmbOcc, ln = 'mMaya_atomShaderSGAmbOcc', at = 'bool', h = 1)
cmds.connectAttr((shaderAmbOcc+ '.outColor'), (shaderSGAmbOcc+ '.surfaceShader'))
ramp = cmds.createNode('ramp')
cmds.setAttr((ramp + '.interpolation'), 0)
cmds.addAttr(ramp, ln = 'mMaya_atomShaderRGBRamp', at = 'bool', h = 1)
valChangePMA = cmds.createNode('plusMinusAverage')
cmds.addAttr(valChangePMA, ln = 'mMaya_atomShaderRGBRampPMA', at = 'bool', h = 1)
cmds.connectAttr((ramp+ '.mMaya_atomShaderRGBRamp'), (valChangePMA+ '.mMaya_atomShaderRGBRampPMA'))
indexDivFactor = 1000.0;
for elem in elems:
indexElem = vanRad_CPK[elem][4]
col = vanRad_CPK[elem][1:-1]
cmds.setAttr((ramp + '.colorEntryList[' +str(indexElem)+ '].position'), (indexElem/indexDivFactor))
#cmds.setAttr((ramp + '.colorEntryList[' +str(indexElem)+ '].color'), col[0], col[1], col[2], type = 'double3')
shade = cmds.shadingNode('surfaceShader', asTexture = 1)
cmds.setAttr((shade + '.outColor'), col[0], col[1], col[2], type = 'double3')
cmds.connectAttr((shade+ '.outColor'), (ramp+ '.colorEntryList[' +str(indexElem)+ '].color'))
cmds.connectAttr((shade+ '.outColor'), (valChangePMA+ '.input3D[' +str(indexElem)+ ']'))
cmds.rename(shade, elems[elem])
return shaderSG, shaderSGAmbOcc, ambOcc, ramp
else:
mel.eval('error "a mMaya default shader has been deleted"')
def addMaterial(self, name, color ):
if color is None :
color = (1.,0.,0.)
name = self.checkName(name)
mat=cmds.ls(name, mat=True)
if len(mat)==0: #create only if mat didnt exist already
#shading group
cmds.sets( renderable=True, noSurfaceShader=True, empty=True, name=name+"SG" )
#material
# = name[1:]
cmds.shadingNode( 'lambert', asShader=True, name=name )
cmds.setAttr( name+".color", color[0], color[1], color[2], type="double3")
cmds.connectAttr(name+".outColor", name+"SG.surfaceShader")
mat = cmds.ls(name, mat=True)
return mat
def assignMaterial(self,object,matname,texture = True,**kw):
object = self.getObject(object,doit=True)
#print "assign " , matname
#print matname
if type(matname) != list :
# name = name.replace(":","_")
matname = self.checkName(matname)
mat=cmds.ls(matname, mat=True)
else :
if type(matname[0]) is list :
mat = matname[0]
matname = str(matname[0][0])
else :
mat = matname
matname = str(matname[0])
#print "find " ,mat
matname = self.checkName(matname)
# if not mat:
# self.createMaterial (matname, (1.,1.,1.), 'lambert')
# conn = cmds.listConnections(cmds.listHistory(object))
## if len(conn) >= 2:
# shade = cmds.listHistory(object)[0].split('|')[1]
# cmds.hyperShade( matname,o=shade,assign=True )
#print 'assign ',object,matname
# print mat,matname
try :
cmds.sets(object, edit=True, forceElement=matname+"SG")
except :
print "problem assigning mat" + matname + " to object "+object
def assignNewMaterial(self, matname, color, type, object):
print matname, color, type, object
self.createMaterial (matname, color, type)
self.assignMaterial (object,matname)
def colorMaterial(self,matname, color):
matname=self.getMaterial(matname)
if len(matname)==1:
matname=matname[0]
cmds.setAttr( str(matname)+".color", color[0], color[1], color[2], type="double3")
def getMaterial(self,matname):
if type(matname) != str :
return matname
matname = self.checkName(matname)
mat=cmds.ls(matname, mat=True)
if len(mat)==0:
return None
else :
return mat
def getMaterialName(self,mat):
return str(mat)
def getAllMaterials(self):
#return unicode list of material
#mat=getMaterials()
matlist=cmds.ls(mat=True)#[]
return matlist
def getMaterialObject(self,obj):
obj = self.getObject(obj)
matnames = cmds.listConnections(cmds.listHistory(obj,f=1),type='lambert')
return matnames
def changeObjColorMat(self,obj,color):
#obj should be the object name, in case of mesh
#in case of spher/cylinder etc...atom name give the mat name
#thus matname should be 'mat_'+obj
obj = self.checkName(obj)
matname = "mat_"+str(obj)
self.colorMaterial(matname,color)
def changeColor(self,mesh,colors,perVertex=True,perObjectmat=None,pb=False,
facesSelection=None,faceMaterial=False):
#if hasattr(geom,'obj'):obj=geom.obj
#else : obj=geom
#mesh = self.getMesh(mesh)
if colors[0] is not list and len(colors) == 3 :
colors = [colors,]
print "change color",type(mesh),mesh
res = self.color_mesh_perVertex(mesh,colors,perVertex=perVertex,
facesSelection=facesSelection,
faceMaterial=faceMaterial)
if not res or len(colors) == 1:
#simply apply the color/material to mesh
#get object material, if none create one
# print "material assign"
mats = self.getMaterialObject(mesh)
# print mats
if not mats :
self.assignNewMaterial("mat"+self.getName(mesh), colors[0],
'lambert', mesh)
else :
self.colorMaterial(mats[0],colors[0])
def getMaterialProperty(self,material, **kw):
"""
Change a material properties.
* overwrited by children class for each host
@type material: string/Material
@param material: the material to modify
- color
- specular
- ...
"""
mat =self.getMaterial(material)
if len(mat)==1:
mat=mat[0]
res = {}
if mat is None :
return
if "specular" in kw :
res["specular"] = True#mat[c4d.MATERIAL_USE_SPECULAR]
if "specular_color" in kw :
res["specular_color"] = [0,0,0]#self.ToVec(mat[c4d.MATERIAL_SPECULAR_COLOR],pos=False)
if "specular_width" in kw :
res["specular_width"] = 0#mat[c4d.MATERIAL_SPECULAR_WIDTH]
if "color" in kw :
res["color"] = cmds.getAttr( str(mat)+".color")[0]
if "diffuse" in kw :
res["diffuse"] = cmds.getAttr( str(mat)+".diffuse")[0]
return res
###################Meshs and Objects#####################################################################################
def Sphere(self,name,res=16.,radius=1.0,pos=None,color=None,
mat=None,parent=None,type="nurb"):
# iMe[atn],node=cmds.sphere(name=name+"Atom_"+atn,r=rad)
name = self.checkName(name)
t=res/100.
if type == "nurb" :
transform_node,shape = cmds.sphere(name=name,r=radius,sections=int(res),
spans=int(res)) #NurbSphere
elif type == "poly":
transform_node,shape = cmds.polySphere( n=name, r=radius,sx=int(res), sy=int(res))
#shape is name+"Shape"
if pos is not None :
cmds.move(float(pos[0]),float(pos[1]),float(pos[2]),
transform_node,absolute=True )
if mat is not None :
mat = self.getMaterial(mat)
if mat is not None :
self.assignMaterial(transform_node,mat)
else :
if color is not None :
mat = self.addMaterial("mat"+name,color)
else :
mat = self.addMaterial("mat"+name,[1.,1.,0.])
# mat = self.getMaterial(name)
self.assignMaterial(transform_node,mat)
if parent is not None :
self.reParent(transform_node,parent)
return transform_node,shape
def updateSphereMesh(self,mesh,verts=None,faces=None,basemesh=None,
scale=None,typ=True,**kw):
#scale or directly the radius..Try the radius
#scale is actualy the radius
# name = self.getObject(mesh)
#would it be faster with openMaya
mesh = self.checkName(mesh)
if typ:
cmds.sphere(mesh,e=1,r=scale)
else :
cmds.polySphere(mesh,e=1,r=scale)
def updateSphereObj(self,obj,coords=None):
if obj is None or coords is None: return
obj = self.getObject(obj)
#would it be faster we transform action
self.setTranslation(obj,coords)
# cmds.move(float(coords[0]),float(coords[1]),float(coords[2]), obj, absolute=True )
# def updateSphereObjs(self,g,coords=None):
# if not hasattr(g,'obj') : return
# if coords == None :
# newcoords=g.getVertices()
# else :
# newcoords=coords
# #print "upadteObjSpheres"
# #again map function ?
# for i,nameo in enumerate(g.obj):
# c=newcoords[i]
# o=getObject(nameo)
# cmds.move(float(c[0]),float(c[1]),float(c[2]), o, absolute=True )
def instancesCylinder(self,name,points,faces,radii,
mesh,colors,scene,parent=None):
cyls=[]
mat = None
if len(colors) == 1:
mat = self.retrieveColorMat(colors[0])
if mat == None and colors[0] is not None:
mat = self.addMaterial('mat_'+name,colors[0])
for i in range(len(faces)):
cyl = self.oneCylinder(name+str(i),points[faces[i][0]],
points[faces[i][1]],radius=radii[i],
instance=mesh,material=mat,parent = parent)
cyls.append(cyl)
return cyls
def updateInstancesCylinder(self,name,cyls,points,faces,radii,
mesh,colors,scene,parent=None,delete = True):
mat = None
if len(colors) == 1:
mat = self.retrieveColorMat(colors[0])
if mat == None and colors[0] is not None:
mat = self.addMaterial('mat_'+name,colors[0])
for i in range(len(faces)):
col=None
if i < len(colors):
col = colors[i]
if i < len(cyls):
self.updateOneCylinder(cyls[i],points[faces[i][0]],
points[faces[i][1]],radius=radii[i],
material=mat,color=col)
self.toggleDisplay(cyls[i],True)
else :
cyl = self.oneCylinder(name+str(i),points[faces[i][0]],
points[faces[i][1]],radius=radii[i],
instance=mesh,material=mat,parent = parent)
cyls.append(cyl)
if len(faces) < len(cyls) :
#delete the other ones ?
for i in range(len(faces),len(cyls)):
if delete :
obj = cyls.pop(i)
self.deleteObject(obj)
else :
self.toggleDisplay(cyls[i],False)
return cyls
def instancesSphere(self,name,centers,radii,meshsphere,colors,scene,parent=None):
name = self.checkName(name)
sphs=[]
mat = None
if len(colors) == 1:
print (colors)
mat = self.retrieveColorMat(colors[0])
if mat == None:
mat = self.addMaterial('mat_'+name,colors[0])
for i in range(len(centers)):
sphs.append(cmds.instance(meshsphere,name=name+str(i)))
#local transformation ?
cmds.move(float(centers[i][0]),float(centers[i][1]),float(centers[i][2]),name+str(i))
cmds.scale(float(radii[i]),float(radii[i]),float(radii[i]), name+str(i),absolute=True )
if mat == None : mat = self.addMaterial("matsp"+str(i),colors[i])
self.assignMaterial(name+str(i),mat)#mat[bl.retrieveColorName(sphColors[i])]
self.addObjectToScene(scene,sphs[i],parent=parent)
return sphs
def updateInstancesSphere(self,name,sphs,centers,radii,meshsphere,
colors,scene,parent=None,delete=True):
mat = None
if len(colors) == 1:
mat = self.retrieveColorMat(colors[0])
if mat == None and colors[0] is not None:
mat = self.addMaterial('mat_'+name,colors[0])
for i in range(len(centers)):
if len(radii) == 1 :
rad = radii[0]
elif i >= len(radii) :
rad = radii[0]
else :
rad = radii[i]
if i < len(sphs):
cmds.move(float(centers[i][0]),float(centers[i][1]),float(centers[i][2]),sphs[i])#name+str(i))
cmds.scale(float(rad),float(rad),float(rad), sphs[i],absolute=True )
# sphs[i].SetAbsPos(self.FromVec(centers[i]))
# sphs[i][905]=c4d.Vector(float(rad),float(rad),float(rad))
if mat == None :
if colors is not None and i < len(colors) and colors[i] is not None :
mat = self.addMaterial("matsp"+str(i),colors[i])
if colors is not None and i < len(colors) and colors[i] is not None :
self.colorMaterial(mat,colors[i])
self.toggleDisplay(sphs[i],True)
else :
sphs.append(cmds.instance(meshsphere,name=name+str(i)))
#local transformation ?
cmds.move(float(centers[i][0]),float(centers[i][1]),float(centers[i][2]),name+str(i))
cmds.scale(float(rad),float(rad),float(rad), name+str(i),absolute=True )
if mat == None : mat = self.addMaterial("matsp"+str(i),colors[i])
self.assignMaterial(name+str(i),mat)#mat[bl.retrieveColorName(sphColors[i])]
self.addObjectToScene(scene,sphs[i],parent=parent)
if mat == None :
if colors is not None and i < len(colors) and colors[i] is not None :
mat = self.addMaterial("matsp"+str(i),colors[i])
self.addObjectToScene(scene,sphs[i],parent=parent)
if len(centers) < len(sphs) :
#delete the other ones ?
for i in range(len(centers),len(sphs)):
if delete :
obj = sphs.pop(i)
print "delete",obj
self.deleteObject(obj)
else :
self.toggleDisplay(sphs[i],False)
return sphs
def constraintLookAt(self,object):
"""
Cosntraint an hostobject to llok at the camera
@type object: Hostobject
@param object: object to constraint
"""
self.getObject(object)
cmds.orientConstraint( 'persp', object )
def updateText(self,text,string="",parent=None,size=None,pos=None,font=None):
text = self.checkName(text)
if string : cmds.textCurves(text, e=1, t=string )
# if size is not None : text[c4d.PRIM_TEXT_HEIGHT]= size
# if pos is not None : self.setTranslation(text,pos)
# if parent is not None : self.reParent(text,parent)
def extrudeText(self,text,**kw):
tr,parent = self.getTransformNode(text)
nChild = parent.childCount()
print nChild
#dag = om.MFnDagNode(node)
dnode = om.MFnDependencyNode(parent.transform())
child_path = om.MDagPath()
cmd ="constructionHistory=True,normalsOutwards=True,range=False,polygon=1,\
tolerance=0.01,numberOfSides=4 ,js=True,width=0 ,depth=0 ,extrudeDepth=0.5,\
capSides=4 ,bevelInside=0 ,outerStyle=0 ,innerStyle=0 ,\
polyOutMethod=0,polyOutCount=200,polyOutExtrusionType=2 ,\
polyOutExtrusionSamples=3,polyOutCurveType=2 ,\
polyOutCurveSamples=3,polyOutUseChordHeightRatio=0)"
for i in range(nChild):
#get all curve
node_child = parent.child(i)
child_tr,child_path = self.getTransformNode(node_child)
dnode = om.MFnDependencyNode(node_child)
nChildChild = child_path.childCount()
for j in range(nChildChild):
cmdchilds="cmds.bevelPlus("
node_child_child = child_path.child(j)
dnode = om.MFnDependencyNode(node_child_child)
cmdchilds+='"'+dnode.name()+'",'
cmdchilds+="n='bevel_"+dnode.name()+str(j)+"',"+cmd
cmdbis = 'cmds.bevel("'+dnode.name()+'",n="bevel_'+dnode.name()+str(j)+'", ed=0.5)'
eval(cmdbis)
cmds.bevel(e=1,w=0,d=0)
def Text(self,name="",string="",parent=None,size=5.,pos=None,font='Courier',
lookAt=False,**kw):
return_extruder = False
name = self.checkName(name)
if "extrude" in kw :
extruder = None
if type(kw["extrude"]) is bool and kw["extrude"]:
pass
text = cmds.textCurves( n= name, f=font, t=string )
## Result: [u'testShape', u'makeTextCurves2'] #
if pos is not None :
#should add -14
pos[0] = pos[0]-14.0#not center
self.setTranslation(name+'Shape',pos)
# if parent is not None:
self.addObjectToScene(self.getCurrentScene(),name+'Shape',parent=parent)
if lookAt:
self.constraintLookAt(name)
self.scaleObj(text[0],[size,size,size])
if "extrude" in kw :
extruder = None
#create an extruder
if type(kw["extrude"]) is bool and kw["extrude"]:
self.extrudeText(text)
# extruder = cmds.bevelPlus( text[1], ed=0.5)
# extruder = cmds.bevel( text, ed=0.5,w=0.0,d=0.0)
#reparent the extruder ?
# self.reParent(extruder,parent)
#po=1, cap=4,
# extruded=cmds.extrude( extrude_obj,self.checkName(name)+"_spline",
# et = 2, ucp = 1,n=name, fpt=1,upn=1)
return_extruder = True
else :
self.extrudeText(text)
# extruder = cmds.bevel( text, ed=0.5,w=0.0,d=0.0)
self.reParent(extruder,parent)
# if extruder is not None :
# pass
self.addObjectToScene(self.getCurrentScene(),name+'Shape',parent=parent)
if return_extruder :
return text,None
return text
def getBoxSize(self,name):
#kPolyCube
# cmds.select(name)
# print(name)
sx = cmds.polyCube(name, q=True,w=True)
sy = cmds.polyCube(name, q=True,h=True)
sz = cmds.polyCube(name, q=True,d=True)
return [sx,sy,sz]
def box(self,name,center=[0.,0.,0.],size=[1.,1.,1.],cornerPoints=None,visible=1,
mat=None,**kw):
if cornerPoints != None :
for i in range(3):
size[i] = cornerPoints[1][i]-cornerPoints[0][i]
for i in range(3):
center[i]=(cornerPoints[0][i]+cornerPoints[1][i])/2.
res = 15.
name = self.checkName(name)
box,shape = cmds.polyCube(name=name,w=float(size[0]),h=float(size[1]),
d=float(size[2]), sx=res, sy=res, sz=res )
mat = self.addMaterial("mat"+name,[1.,1.,0.])
self.assignMaterial(box,mat)
cmds.move(float(center[0]),float(center[1]),float(center[2]),box)
parent = None
if "parent" in kw :
parent = kw["parent"]
self.addObjectToScene(self.getCurrentScene(),box,parent=parent)
return box,shape
def updateBox(self,box,center=[0.,0.,0.],size=[1.,1.,1.],cornerPoints=None,
visible=1, mat = None):
box=self.getObject(box)
if cornerPoints != None :
for i in range(3):
size[i] = cornerPoints[1][i]-cornerPoints[0][i]
for i in range(3):
center[i]=(cornerPoints[0][i]+cornerPoints[1][i])/2.
cmds.move(float(center[0]),float(center[1]),float(center[2]),box)
cmds.polyCube(box,e=1,w=float(size[0]),h=float(size[1]),
d=float(size[2]))
def Cone(self,name,radius=1.0,length=1.,res=16,pos = None,parent=None):
name = self.checkName(name)
diameter = 2*radius
cone,mesh=cmds.cone(name=name,axis=[0.0,1.0,0.0],hr=length,
r=radius,s=res,nsp=res)
if pos != None : cmds.move(float(pos[0]),float(pos[1]),float(pos[2]),cone)
if parent is not None:
self.reParent(cone,parent)
# self.addObjectToScene(self.getCurrentScene(),instance)
return str(cone),mesh
def Cylinder(self,name,radius=1.,length=1.,res=16,pos = None,parent=None,**kw):
#import numpy
name = self.checkName(name)
diameter = 2*radius
axis = [0.0,0.0,1.0]
if "axis" in kw : #orientation
dic = {"+X":[1.,0.,0.],"-X":[-1.,0.,0.],"+Y":[0.,1.,0.],"-Y":[0.,-1.,0.],
"+Z":[0.,0.,1.],"-Z":[0.,0.,-1.]}
if type(kw["axis"]) is str :
axis = dic[kw["axis"]]
else :
axis = kw["axis"]
cyl,mesh=cmds.polyCylinder(name=name,axis=axis,
r=radius, sx=res, sy=res, sz=5, h=length)
if pos != None : cmds.move(float(pos[0]),float(pos[1]),float(pos[2]),cyl)
if parent is not None:
self.reParent(cyl,parent)
# self.addObjectToScene(self.getCurrentScene(),instance)
return str(cyl),mesh#,mesh
def oneCylinder(self,name,head,tail,radius=None,instance=None,material=None,
parent = None,color=None):
name = self.checkName(name)
laenge,wsz,wz,coord=self.getTubeProperties(head,tail)
# print "oneCylinder instance",instance
if instance == None :
obj = self.Cylinder(name)
else :
obj = self.newMInstance(name,instance,parent=parent)
# obj = name
# self.translateObj(name,coord)
# self.setTranslation(name,coord)
# #obj.setLocation(float(coord[0]),float(coord[1]),float(coord[2]))
# cmds.setAttr(name+'.ry',float(degrees(wz)))
# cmds.setAttr(name+'.rz',float(degrees(wsz)))
# cmds.scale( 1, 1, laenge, name,absolute=True )
if radius is None :
radius= 1.0
self.setTransformation(obj,trans=coord,scale=[radius, radius, laenge],
rot=[0.,wz,wsz])
if material is not None :
self.assignMaterial(obj,material)
elif color is not None :
mats = self.getMaterialObject(obj)
if not mats :
mat = self.addMaterial("mat_"+name,color)
self.assignMaterial(obj,mat)
else :
self.colorMaterial(mats[0],color)
return obj
def updateOneCylinder(self,name,head,tail,radius=None,material=None,color=None):
name = self.checkName(name)
laenge,wsz,wz,coord=self.getTubeProperties(head,tail)
obj = self.getObject(name)
if radius is None :
radius= 1.0
self.setTransformation(obj,trans=coord,scale=[radius, radius, laenge],
rot=[0.,wz,wsz])
if material is not None :
self.assignMaterial(obj,material)
elif color is not None :
mats = self.getMaterialObject(obj)
if not mats :
mat = self.addMaterial("mat_"+name,color)
self.assignMaterial(obj,mat)
else :
self.colorMaterial(mats[0],color)
return obj
def updateTubeObj(self,o,coord1,coord2):
laenge,wsz,wz,pos=self.getTubeProperties(coord1,coord2)
self.setTransformation(o,trans=pos,scale=[1., 1., laenge],
rot=[0.,wz,wsz])
# cmds.scale( 1., 1., laenge, o,absolute=True )
# self.setTranslation(o,pos)
## cmds.move(float(pos[0]),float(pos[1]),float(pos[2]), o, absolute=True )
# cmds.setAttr(o+'.ry',float(degrees(wz)))
# cmds.setAttr(o+'.rz',float(degrees(wsz)))
def updateTubeMeshold(self,atm1,atm2,bicyl=False,cradius=1.0,quality=0):
self.updateTubeObj(atm1,atm2,bicyl=bicyl,cradius=cradius)
def updateTubeMesh(self,mesh,basemesh=None,cradius=1.0,quality=0):
# print mesh
# print cradius, mesh
mesh = self.getObject(str(mesh))
# print mesh
maya.cmds.polyCylinder(mesh,e=True,r=cradius)
# def updateTubeObjs(self,g):
# if not hasattr(g,'obj') : return
# newpoints=g.getVertices()
# newfaces=g.getFaces()
# #print "upadteObjTubes"
# for i,o in enumerate(g.obj):
# laenge,wsz,wz,pos=self.getTubeProperties(points[f[0]],points[f[1]])
# cmds.scale( 1, 1, laenge, o,absolute=True )
# cmds.move(float(pos[0]),float(pos[1]),float(pos[2]), o, absolute=True )
# cmds.setAttr(o+'.ry',float(degrees(wz)))
# cmds.setAttr(o+'.rz',float(degrees(wsz)))
def plane(self,name,center=[0.,0.,0.],size=[1.,1.],cornerPoints=None,visible=1,**kw):
#polyPlane([axis=[linear, linear, linear]], [
# constructionHistory=boolean], [createUVs=int], [height=linear],
# [name=string], [object=boolean], [subdivisionsX=int],
# [subdivisionsY=int], [texture=int], [width=linear])
plane,shape = cmds.polyPlane(name=name,w=float(size[0]),h=float(size[1]),
ax=[0.,0.,1.])
if cornerPoints != None :
for i in range(3):
size[i] = cornerPoints[1][i]-cornerPoints[0][i]
for i in range(3):
center[i]=(cornerPoints[0][i]+cornerPoints[1][i])/2.
cmds.move(float(center[0]),float(center[1]),float(center[2]),plane)
if "subdivision" in kw :
cmds.polyPlane(plane,e=1,
sx=kw["subdivision"][0],sy=kw["subdivision"][1])
if "axis" in kw : #orientation
dic = { "+X":[1.,0.,0.],"-X":[-1.,0.,0.],
"+Y":[0.,1.,0.],"-Y":[0.,-1.,0.],
"+Z":[0.,0.,1.],"-Z":[0.,0.,-1.]}
idic = { 0:[1.,0.,0.],1:[-1.,0.,0.],
2:[0.,1.,0.],3:[0.,-1.,0.],
4:[0.,0.,1.],5:[0.,0.,-1.]}
if type(kw["axis"]) is str :
axis = dic[kw["axis"]]
else : #int
axis = idic[kw["axis"]]
cmds.polyPlane(plane,e=1,ax=axis)
# if "material" in kw :
# texture = plane.MakeTag(c4d.Ttexture)
# if type(kw["material"]) is c4d.BaseMaterial :
# texture[1010] = kw["material"]
# else :
# texture[1010] = self.addMaterial("plane",[1.,1.,0.])
parent = None
if "parent" in kw :
parent = kw["parent"]
self.addObjectToScene(self.getCurrentScene(),plane,parent=parent)
return plane,shape
def PointCloudObject(self,name,**kw):
#print "cloud", len(coords)
name = self.checkName(name)
coords=kw['vertices']
# nface = 0
# if kw.has_key("faces"):
# nface = len(kw['faces'])
# obj = self.createsNmesh(name+'ds',coords,None,[])
# return obj[0]
partShape,part = self.particule(name+"ds", coords)
return part,partShape
def getJointPosition(self,jointname):
return self.getTranslation(jointname)
#return self.getTranslationOM(jointname)
# fnJt=oma.MFnIkJoint()
# mobj = self.getNode(jointname)
# if not fnJt.hasObj(mobj ) :
# print "no joint provided!"
# return None
# fnJt.setObject(mobj)
# cvs = om.MPointArray()
# ncurve.getCVs(cvs,om.MSpace.kPostTransform)
# return cvs
def updateArmature(self,basename,coords,listeName=None,scn=None,root=None,**kw):
for j in range(len(coords)):
atC=coords[j]
name = basename+'bone'+str(j)
if listeName is not None:
name = listeName[j]
relativePos=[atC[0],atC[1],atC[2]]
cmds.joint(self.checkName(name),e=1, p=relativePos)
def armature(self,basename,coords,listeName=None,scn=None,root=None,**kw):
#bones are called joint in maya
#they can be position relatively or globally
basename = self.checkName(basename)
bones=[]
# center = self.getCenter(coords)
parent = self.newEmpty(basename)
self.addObjectToScene(scn,parent,parent=root)
for j in range(len(coords)):
atC=coords[j]
#bones.append(c4d.BaseObject(BONE))
relativePos=[atC[0],atC[1],atC[2]]
name = basename+'bone'+str(j)
if listeName is not None:
name = listeName[j]
joint=cmds.joint(n=self.checkName(name), p=relativePos) #named "joint1"
bones.append(joint)
if scn != None :
if j==0 : self.addObjectToScene(scn,bones[j],parent=parent)
else : self.addObjectToScene(scn,bones[j],parent=bones[j-1])
return parent,bones
def bindGeom2Bones(self,listeObject,bones):
"""
Make a skinning. Namely bind the given bones to the given list of geometry.
This function will joins the list of geomtry in one geometry
@type listeObjects: list
@param listeObjects: list of object to joins
@type bones: list
@param bones: list of joins
"""
if len(listeObject) >1:
self.JoinsObjects(listeObject)
else :
self.ObjectsSelection(listeObject,"new")
#2- add the joins to the selection
self.ObjectsSelection(bones,"add")
#3- bind the bones / geoms
cmds.bindSkin()
#IK:cmds.ikHandle( sj='joint1', ee='joint5', p=2, w=.5 )
def getParticulesPosition(self,name):
name = self.checkName(name)
partO=self.getMShape(name) #shape..
fnP = omfx.MFnParticleSystem(partO)
pos=om.MVectorArray(fnP.count())
oriPsType = fnP.renderType()
if(oriPsType == omfx.MFnParticleSystem.kTube):
fnP.position0(pos);
else:
fnP.position(pos);
return pos
def setParticulesPosition(self,newPos,PS=None):
if PS == None :
return
obj = self.checkName(PS)
partO=self.getMShape(obj) #shape..
fnP = omfx.MFnParticleSystem(partO)
oriPsType = fnP.renderType()
pos=om.MVectorArray(fnP.count())
#pts = om.MPointArray(fnP.count())
for v in newPos:
p = om.MVector( float(v[0]),float(v[1]),float(v[2]) )
pos.append(p)
# pts.append(p)
#fnP.emit(pts)
fnP.setPerParticleAttribute("position",pos)
def getParticles(self,name,**kw):
PS = self.getObject(name)
return PS
def updateParticles(self,newPos,PS=None,**kw):
if PS == None :
return
obj = self.checkName(PS)
partO=self.getMShape(obj) #shape..
fnP = omfx.MFnParticleSystem(partO)
oriPsType = fnP.renderType()
currentN = fnP.count()
N = len(newPos)
fnP.setCount(N)
pos=om.MVectorArray(fnP.count())
#pts = om.MPointArray(fnP.count())
for v in newPos:
p = om.MVector( float(v[0]),float(v[1]),float(v[2]) )
pos.append(p)
fnP.setPerParticleAttribute("position",pos)
#this update the particle position not the particle number
def updateParticleRotation(self,obj,rotation):
obj = self.checkName(obj)
partO=self.getMShape(obj) #shape..
fnP = omfx.MFnParticleSystem(partO)
oriPsType = fnP.renderType()
rot=om.MVectorArray(fnP.count())
#euler angle?
for v in rotation:
p = om.MVector( float(v[0]),float(v[1]),float(v[2]) )
pos.append(p)
fnP.setPerParticleAttribute("rotationPP",rot)
#this update the particle position not the particle number
def updateParticle(self,obj,vertices,faces):
obj = self.checkName(obj)
partO=self.getMShape(obj) #shape..
fnP = omfx.MFnParticleSystem(partO)
oriPsType = fnP.renderType()
if(oriPsType == omfx.MFnParticleSystem.kTube):
if faces is None :
return
position0 = om.MVectorArray()
position1 = om.MVectorArray()
for i,f in enumerate(face):
coord1 = c = vertices[f[0]]
coord2 = vertices[f[1]]
p = om.MVector( float(c[0]),float(c[1]),float(c[2]) )
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
position0.append(p)
c= coord2
p = om.MVector( float(c[0]),float(c[1]),float(c[2]) )
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
position1.append(p)
fnP.setPerParticleAttribute("position0",position0)
fnP.setPerParticleAttribute("position1",position1)
else :
pos=om.MVectorArray(fnP.count())
#pts = om.MPointArray(fnP.count())
for v in vertices:
p = om.MVector( float(v[0]),float(v[1]),float(v[2]) )
pos.append(p)
# pts.append(p)
#fnP.emit(pts)
fnP.setPerParticleAttribute("position",pos)
#fnP.setPerParticleAttribute? position
#stat = resultPs.emit(finalPos);
def particule(self,name, coord,**kw):
name = self.checkName(name)
if coord is not None :
try :
coord = numpy.array(coord).tolist()
except :
pass
part,partShape=cmds.particle(n=name,p=list(coord))
else :
part,partShape=cmds.particle(n=name)
# instant = cmds.particleInstancer(part, a = 1, object = cyl[0],
# position = 'bondPos', aimDirection = 'velocity',
# scale = 'bondScaler',
# name = (chainName+ '_geoBondsInstances'))
return partShape,part
def updateMetaball(self,name,vertices=None):
if vertices is None :
return
self.updateParticle(name,vertices=vertices,faces=None)
def metaballs(self,name,coords,radius,scn=None,root=None,**kw):
# atoms=selection.findType(Atom)
#no metaball native in mauya, need to use particle set to blobby surface
#use of the point cloud polygon object as the emmiter
# name is on the form 'metaballs'+mol.name
# if scn == None:
# scn = self.getCurrentScene()
#molname = name.split("balls")[1]
#emiter = molname+"_cloud"
name = self.checkName(name)
partShape,part = self.particule(name, coords)
#need to change the rep
node = self.getNode(partShape)
plug = self.getNodePlug("particleRenderType",node)
plug.setInt(7); #Bloby surface s/w
return part,partShape
def splinecmds(self,name,coords,type="",extrude_obj=None,scene=None,parent=None):
#Type : "sBezier", "tBezier" or ""
name = self.checkName(name)
if scene is None :
scene = self.getCurrentScene()
#parent=newEmpty(name)
curve = cmds.curve(n=name,p=coords)
#return the name only, but create a transform node with name : name
#and create a curveShape named curveShape1
objName=cmds.ls("curveShape1")
cmds.rename(objName,name+"Shape")
cmds.setAttr(name+"Shape"+".dispEP",1)
if parent is not None :
cmds.parent( name, parent)
return name,None
def extrudeSpline(self,spline,**kw):
extruder = None
shape = None
spline_clone = None
if "shape" in kw:
if type(kw["shape"]) == str :
shape = self.build_2dshape("sh_"+kw["shape"]+"_"+str(spline),
kw["shape"])[0]
else :
shape = kw["shape"]
if shape is None :
shapes = self.build_2dshape("sh_circle"+str(spline))[0]
if "extruder" in kw:
extruder = kw["extruder"]
# if extruder is None :
# extruder=self.sweepnurbs("ex_"+spline.GetName())
if "clone" in kw and kw["clone"] :
spline_clone = cmds.duplicate(spline,n="exd"+str(spline))
self.resetTransformation(spline_clone)
extruder=cmds.extrude( shape[0],spline_clone,
et = 2, ucp = 1,n="ex_"+str(spline), fpt=1,upn=1)
self.toggleDisplay(spline_clone,False)
return extruder,shape,spline_clone
else :
extruder=cmds.extrude( shape[0],spline,
et = 2, ucp = 1,n="ex_"+str(spline), fpt=1,upn=1)
return extruder,shape
#setAttr "extrudedSurfaceShape1.simplifyMode" 1;
def build_2dshape(self,name,type="circle",**kw):
shapedic = {"circle":{"obj":cmds.circle,"size":["r",]},
# "rectangle":{"obj":None,"size":[0,0]}
}
shape = shapedic[type]["obj"](n=name, nr=(1, 0, 0), c=(0, 0, 0),r=0.3)
dopts = [1.,1.]
if "opts" in kw :
dopts = kw["opts"]
if len(shapedic[type]["size"]) == 1 :
pass
# shape[shapedic[type]["size"][0]] = dopts[0]
else :
for i in range(len(shapedic[type]["size"])) :
pass
# shape[shapedic[type]["size"][i]] = dopts[i]
self.addObjectToScene(None,shape)
return shape,name+"Shape"
def spline(self,name,coords,type="",extrude_obj=None,scene=None,
parent=None,**kw):
#Type :
name = self.checkName(name)
if scene is None :
scene = self.getCurrentScene()
#parent=newEmpty(name)
if extrude_obj is not None:
shape,curve = self.omCurve(name+"_spline",coords)
#return the name only, but create a transform node with name : name
#and create a curveShape named curveShape1
if parent is not None :
cmds.parent( self.checkName(name)+"_spline", parent)
# extrude profile curve along path curve using "flat" method
# The extrude type can be distance-0, flat-1, or tube-2
extruded=cmds.extrude( extrude_obj,self.checkName(name)+"_spline",
et = 2, ucp = 1,n=name, fpt=1,upn=1)
#setAttr "extrudedSurfaceShape1.simplifyMode" 1;
return name,shape,extruded
shape,curve = self.omCurve(name,coords)
#return the name only, but create a transform node with name : name
#and create a curveShape named curveShape1
if parent is not None :
cmds.parent( self.checkName(name), parent)
return name,shape
def getSplinePoints(self,name,convert=False):
name = self.checkName(name)
ncurve = om.MFnNurbsCurve()
mobj = self.getNode(self.checkName(name))
if not ncurve.hasObj(mobj ) :
mobj = self.getNode(self.checkName(name)+"Shape")
if not ncurve.hasObj(mobj) :
print "no curve shape provided!"
return None
ncurve.setObject(mobj)
cvs = om.MPointArray()
ncurve.getCVs(cvs,om.MSpace.kPostTransform)
return cvs
def update_spline(self,name,coords):
#need to provide the object shape name
name = self.checkName(name)
ncurve = om.MFnNurbsCurve()
mobj = self.getNode(self.checkName(name))
if not ncurve.hasObj(mobj ) :
mobj = self.getNode(self.checkName(name)+"Shape")
if not ncurve.hasObj(mobj) :
print "no curve shape provided!"
return None
ncurve.setObject(mobj)
deg = 3; #Curve Degree
ncvs = len(coords); #Number of CVs
spans = ncvs - deg # Number of spans
nknots = spans+2*deg-1 # Number of knots
controlVertices = om.MPointArray()
knotSequences = om.MDoubleArray()
# point array of plane vertex local positions
for c in coords:
p = om.MPoint(om.MFloatPoint( float(c[0]),float(c[1]),float(c[2]) ))
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
controlVertices.append(p)
# for i in range(nknots):
# knotSequences.append(i)
# create(controlVertices,knotSequences, deg,
# om.MFnNurbsCurve.kOpen, False, False
ncurve.setCVs(controlVertices,om.MSpace.kPostTransform)
# ncurve.setKnots(knotSequences)
ncurve.updateCurve()
def omCurve(self,name,coords,**kw):
#default value
name = self.checkName(name)
deg = 3; #Curve Degree
ncvs = len(coords); #Number of CVs
if kw.has_key("deg"):
deg = kw['deg']
spans = ncvs - deg # Number of spans
nknots = spans+2*deg-1 # Number of knots
controlVertices = om.MPointArray()
knotSequences = om.MDoubleArray()
# point array of plane vertex local positions
for c in coords:
p = om.MPoint(om.MFloatPoint( float(c[0]),float(c[1]),float(c[2]) ))
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
controlVertices.append(p)
for i in range(nknots):
knotSequences.append(i)
curveFn=om.MFnNurbsCurve()
curve = curveFn.create(controlVertices,knotSequences, deg,
om.MFnNurbsCurve.kOpen, False, False)
# curveFn.setName(name)
print (curveFn.partialPathName())
print (curveFn.name())
shapename = curveFn.name()
objName = shapename.split("Shape")[0]
n = shapename.split("Shape")[1]
# objName=cmds.ls("curve1")[0]
cmds.rename(objName+n,name)
nodeName = curveFn.name() #curveShape
cmds.rename(nodeName, name+"Shape")
return curveFn, curve
def createLines(self,name,coords,normal,faces):
partShape,part = self.linesAsParticles(name,coords,faces)
return part
def linesAsParticles(self,name,coords,face):
#what about omfx to create the system...
name = self.checkName(name)
partShape,part = self.particule(name, None)
path = self.getMShape(part)
node = path.node()
depNodeFn = om.MFnDependencyNode( node )
plug = self.getNodePlug("particleRenderType", node )
plug.setInt(9); #Tube s/w
fnP = omfx.MFnParticleSystem(path)
pts = om.MPointArray()
position0 = om.MVectorArray()
position1 = om.MVectorArray()
for i,f in enumerate(face):
coord1 = c = coords[f[0]]
coord2 = coords[f[1]]
p = om.MVector( float(c[0]),float(c[1]),float(c[2]) )
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
position0.append(p)
c= coord2
p = om.MVector( float(c[0]),float(c[1]),float(c[2]) )
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
position1.append(p)
laenge,wsz,wz,c=self.getTubeProperties(coord1,coord2)
p = om.MPoint(om.MFloatPoint( float(c[0]),float(c[1]),float(c[2]) ))
pts.append(p)
# fnP.emit(pts)
fnP.setPerParticleAttribute("position0",position0)
fnP.setPerParticleAttribute("position1",position1)
fnP.emit(pts)
return partShape,part
def mayaVec(self,v):
return om.MFloatPoint( float(v[0]),float(v[1]),float(v[2]) )
def getFaces(self,obj,**kw):
# import numpy
node = self.getNode('mesh_'+obj)
meshnode = om.MFnMesh(node)
triangleCounts =om.MIntArray()
triangleVertices= om.MIntArray()
meshnode.getTriangles(triangleCounts,triangleVertices)
if self._usenumpy :
return numpy.array(triangleVertices).reshape((len(triangleVertices)/3,3))
else :
return triangleVertices
def polygons(self,name,proxyCol=False,smooth=False,color=[[1,0,0],], material=None, **kw):
normals = kw["normals"]
name,meshFS = self.createsNmesh(name,kw['vertices'],normals,kw['faces'],color=color,
smooth=smooth,material=material)
return name
def createsNmesh(self,name,vertices,normal,faces,color=[[1,0,0],],smooth=False,
material=None,proxyCol=False,**kw):
"""
This is the main function that create a polygonal mesh.
@type name: string
@param name: name of the pointCloud
@type vertices: array
@param vertices: list of x,y,z vertices points
@type vnormals: array
@param vnormals: list of x,y,z vertex normals vector
@type faces: array
@param faces: list of i,j,k indice of vertex by face
@type smooth: boolean
@param smooth: smooth the mesh
@type material: hostApp obj
@param material: material to apply to the mesh
@type proxyCol: booelan
@param proxyCol: do we need a special object for color by vertex (ie C4D)
@type color: array
@param color: r,g,b value to color the mesh
@rtype: hostApp obj
@return: the polygon object
"""
if len(color) == 3 :
if type(color[0]) is not list :
color = [color,]
outputMesh = om.MObject()
#print outputMesh.name()
#cmds.rename(outputMesh.name(), name)
#test=cmds.createNode( 'transform', n='transform1' )
name=name.replace(":","_")
name=name.replace("-","_")
name=name.replace("'","")
name=name.replace('"',"")
name=self.checkName(name)
#print "NMesh ",name
numFaces = 0
if faces is not None :
numFaces = len(faces)
numVertices = len(vertices)
# point array of plane vertex local positions
points = om.MFloatPointArray()
for v in vertices:
points.append(self.mayaVec(v))
#mayaVertices=map(mayaVec,vertices)
#map(points.append,mayaVertices)
# vertex connections per poly face in one array of indexs into point array given above
faceConnects = om.MIntArray()
for f in faces:
for i in f :
faceConnects.append(int(i))
# an array to hold the total number of vertices that each face has
faceCounts = om.MIntArray()
for c in range(0,numFaces,1):
faceCounts.append(int(len(f)))
#create mesh object using arrays above and get name of new mesh
meshFS = om.MFnMesh()
newMesh = meshFS.create(numVertices, numFaces, points, faceCounts,
faceConnects, outputMesh)
# meshFS.updateSurface()
nodeName = meshFS.name()
cmds.rename(nodeName, "mesh_"+name)
#print 'Mesh node name is: %s' % nodeName
objName=cmds.ls("polySurface1")[0]
cmds.rename(objName,name)
#newName should bydefault polySurface something
# assign new mesh to default shading group
if color is not None and len(color) > 1:
self.color_mesh_perVertex(meshFS,color)
doMaterial = True
if type(material) is bool :
doMaterial = material
if doMaterial:
if material == None :
if len(name.split("_")) == 1 : splitname = name
else :
splitname = name.split("_")[1]
#print name,name[:4],splitname,splitname[:4]
self.assignNewMaterial( "mat_"+name, color[0],'lambert' ,"mesh_"+name)
else :
self.assignMaterial("mesh_"+name,material)
if "parent" in kw :
parent = kw["parent"]
# print "reparent ", name,parent
self.reParent(name,parent)
return name,meshFS#,outputMesh
def updatePoly(self,obj,vertices=None,faces=None):
if type(obj) is str:
obj = self.getObject(obj)
if obj is None : return
node = self.getMShape(self.checkName(obj))
if node.hasFn(om.MFn.kMesh):
self.updateMesh(obj,vertices=vertices,faces=faces)
elif node.hasFn(om.MFn.kParticle):
self.updateParticle(obj,vertices=vertices,faces=faces)
def updateMesh(self,meshnode,vertices=None,faces=None, smooth=False,**kw):#chains.residues.atoms.coords,indices
# print meshnode,type(meshnode)
if type(meshnode) is str or type(meshnode) is unicode:
node = self.getMShape(self.checkName(meshnode))#self.getNode(self.checkName(meshnode))
meshnode = om.MFnMesh(node)
# meshnode = self.getObject(meshnode,doit=True)
if meshnode is None:
return
nv = meshnode.numVertices()
nf = meshnode.numPolygons()
if vertices is not None :
numVertices = len(vertices)
# point array of plane vertex local positions
points = om.MFloatPointArray()
for v in vertices:
points.append(self.mayaVec(v))
else :
return
#numVertices = nv
if faces is not None :
numFaces = len(faces)
else :
numFaces = nf
faces = []
faceConnects = om.MIntArray()
for f in faces:
for i in f :
faceConnects.append(int(i))
# an array to hold the total number of vertices that each face has
faceCounts = om.MIntArray()
for c in range(0,numFaces,1):
faceCounts.append(int(len(f)))
#newMesh = meshFS.create(numVertices, numFaces, points, faceCounts, faceConnects, outputMesh)
result = meshnode.createInPlace(numVertices, numFaces, points, faceCounts, faceConnects)
meshnode.updateSurface()
def ToVec(self,v,**kw):
if hasattr(v,"x") :
return [v.x,v.y,v.z]
else :
return v
def arr2marr(self,v):
#from http://www.rtrowbridge.com/blog/2009/02/maya-api-docs-demystified-for-python-users/
self.msutil.createFromList( v, len(v) )
doubleArrayPtr = self.msutil.asDoublePtr()
return doubleArrayPtr
# def vecp2m(self,v):
# #from http://www.rtrowbridge.com/blog/2009/02/maya-api-docs-demystified-for-python-users/
# doubleArrayPtr = self.arr2marr(v)
# vec = om.MVector( doubleArrayPtr )
# return vec
def FromVec(self,v,pos=True):
if isinstance(v,om.MVector):
return v
else :
return om.MVector(v[0], v[1], v[2])
def vec2m(self,v):
if isinstance(v,om.MVector):
return v
else :
return om.MVector(float(v[0]), float(v[1]), float(v[2]))
def ToMat(self,mat,**kw):
#maya - > python
return self.m2matrix(mat)
def FromMat(self,mat,**kw):
#pythn->maya
return self.matrixp2m(mat)
def matrixp2m(self,mat):
#from http://www.rtrowbridge.com/blog/2009/02/python-api-mtransformationmatrixgetrotation-bug/
if isinstance(mat,om.MTransformationMatrix) :
return mat
getMatrix = om.MMatrix()
matrixList = mat#mat.transpose().reshape(16,)
om.MScriptUtil().createMatrixFromList(matrixList, getMatrix)
mTM = om.MTransformationMatrix( getMatrix )
rotOrder = om.MTransformationMatrix().kXYZ
return mTM
def m2matrix(self,mMat):
#return mMat
#do we use numpy
if isinstance(mMat,om.MTransformationMatrix) :
matrix = mMat.asMatrix()
elif isinstance(mMat,om.MMatrix):
matrix = mMat
else :
return mMat
us=om.MScriptUtil()
out_mat = [0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0]
us.createFromList( out_mat, len(out_mat) )
ptr1 = us.asFloat4Ptr()
matrix.get(ptr1)
res_mat = [[0.0, 0.0, 0.0,0.0],
[0.0, 0.0, 0.0,0.0],
[0.0, 0.0, 0.0,0.0],
[0.0, 0.0, 0.0,0.0]]
for i in range(4):
for j in range(4):
val = us.getFloat4ArrayItem(ptr1, i,j)
res_mat[i][j]=val
return res_mat
def alignNormal(self,poly):
pass
def triangulate(self,poly):
#select poly
doc = self.getCurrentScene()
mesh = self.getMShape(poly)
meshname= mesh.partialPathName()
#checkType
if self.getType(meshname) != self.MESH :
return
cmds.polyTriangulate(meshname)
def getMeshVertices(self,poly,transform=False,selected = False):
meshnode = self.checkIsMesh(poly)
if selected :
mverts_indice = []
verts =[]
v = om.MIntArray()
vertsComponent = om.MObject()
meshDagPath = om.MDagPath()
activeList = om.MSelectionList()
om.MGlobal.getActiveSelectionList(activeList)
selIter = om.MItSelectionList(activeList,om.MFn.kMeshVertComponent)
while selIter.isDone():
selIter.getDagPath(meshDagPath, vertsComponent)
if not vertsComponent.isNull():
# ITERATE THROUGH EACH "FACE" IN THE CURRENT FACE COMPONENT:
vertIter = om.MItMeshVertex(meshDagPath,vertsComponent)
while vertIter.isDone():
mverts_indice.append(vertIter.index()) #indice of the faces
pts = faceIter.position(om.MSpace.kWorld)
verts.append(self.ToVec(pts))
faces.append(v[0],v[1],v[2])
vertIter.next()
selIter.next()
return verts,mverts_indice
else :
nv = meshnode.numVertices()
points = om.MFloatPointArray()
meshnode.getPoints(points)
vertices = [self.ToVec(points[i]) for i in range(nv)]
return vertices
def getMeshNormales(self,poly,selected = False):
meshnode = self.checkIsMesh(poly)
nv = meshnode.numNormals()
normals = om.MFloatVectorArray()
meshnode.getVertexNormals(False,normals)
vnormals = [self.ToVec(normals[i]) for i in range(nv)]
if selected :
v,indice = self.getMeshVertices(poly,selected = selected)
vn=[]
for i in indice:
vn.append(vnormals[i])
return vn,indice
return vnormals
def getMeshEdges(self,poly,selected = False):
#to be tested
meshnode = self.checkIsMesh(poly)
ne= meshnode.numEdges()
edges = []
edgeConnects = om.MIntArray()
for i in range(ne):
meshnode.getEdgeVertices(i,edgeConnects)
edges.append(edgeConnects)
return edges
def getMeshFaces(self,poly,selected = False):
meshnode = self.checkIsMesh(poly)
faceConnects = om.MIntArray()
faceCounts = om.MIntArray()
meshnode.getTriangles(faceCounts,faceConnects)
if selected :
mfaces_indice = []
faces =[]
v = om.MIntArray()
faceComponent = om.MObject()
meshDagPath = om.MDagPath()
activeList = om.MSelectionList()
om.MGlobal.getActiveSelectionList(activeList)
selIter = om.MItSelectionList(activeList,om.MFn.kMeshPolygonComponent)
# print "itersel",selIter.isDone()
while 1:
selIter.getDagPath(meshDagPath, faceComponent);
# print "faces ?",faceComponent.isNull()
if not faceComponent.isNull():
# print ' ITERATE THROUGH EACH "FACE" IN THE CURRENT FACE COMPONENT:'
faceIter = om.MItMeshPolygon(meshDagPath,faceComponent)
while 1:
mfaces_indice.append(faceIter.index()) #indice of the faces
faceIter.getVertices(v)
faces.append([v[0],v[1],v[2]])
faceIter.next()
if faceIter.isDone() : break
selIter.next()
if selIter.isDone() : break
return faces,mfaces_indice
if self._usenumpy :
return numpy.array(faceConnects).reshape((len(faceConnects)/3,3))
else :
return faceConnects
def DecomposeMesh(self,poly,edit=True,copy=True,tri=True,transform=True,**kw):
# import numpy
if tri:
self.triangulate(poly)
if type(poly) is str or type(poly) is unicode or type(poly) is list:
mesh = self.getMShape(poly)#dagPath
else :
#have to a object shape node or dagpath
mesh = poly
print ("mesh ", mesh)
if self.getType(mesh.partialPathName()) != self.POLYGON :
if self.getType(mesh.partialPathName()) == self.PARTICULE:
v = self.getParticulesPosition(mesh.partialPathName())
return None,v,None
return None,None,None
#again problem with instance.....
meshnode = om.MFnMesh(mesh)
print ("meshnode",meshnode)
fnTrans = om.MFnTransform(self.getTransformNode(poly)[0])
print ("fnTrans",fnTrans)
# fnTrans = om.MFnTransform(mesh.transform())
#get infos
nv = meshnode.numVertices()
nf = meshnode.numPolygons()
# m = om.MFloatMatrix()
points = om.MFloatPointArray()
normals = om.MFloatVectorArray()
faceConnects = om.MIntArray()
faceCounts = om.MIntArray()
meshnode.getPoints(points)
#meshnode.getNormals(normals)
meshnode.getVertexNormals(False,normals)
meshnode.getTriangles(faceCounts,faceConnects)
fnormals=[]
if self._usenumpy :
faces = numpy.array(faceConnects).reshape((len(faceConnects)/3,3))
else :
faces = faceConnects
vertices = [self.ToVec(points[i]) for i in range(nv)]
vnormals = [self.ToVec(normals[i]) for i in range(nv)]
#remove the copy if its exist? or keep it ?
#need to apply the transformation
if transform :
#node = self.getNode(mesh)
#fnTrans = om.MFnTransform(mesh)
mmat = fnTrans.transformation()
if self._usenumpy :
mat = self.m2matrix(mmat)
vertices = self.ApplyMatrix(vertices,numpy.array(mat).transpose())
vnormals = self.ApplyMatrix(vnormals,numpy.array(mat).transpose())#??
else :
out_mat = [0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0]
self.msutil.createFromList( out_mat, len(out_mat) )
ptr1 = self.msutil.asFloat4Ptr()
mmat.asMatrix().get(ptr1)
m = om.MFloatMatrix(ptr1)
vertices = []
for i in range(nv) :
v = points[i]*m
vertices.append(self.ToVec(v))
# vertices = [self.ToVec(p*m) for p in points]
# if edit and copy :
# self.getCurrentScene().SetActiveObject(poly)
# c4d.CallCommand(100004787) #delete the obj
print ("ok",len(faces),len(vertices),len(vnormals))
if "fn" in kw and kw["fn"] :
fnormals = []
p = om.MVector( 0.,0.,0. )
for i in range(len(faces)) :
meshnode.getPolygonNormal(i,p,om.MSpace.kWorld)#kPostTransform
fnormals.append(self.ToVec(p))
return faces,vertices,vnormals,fnormals
else :
return faces,vertices,vnormals
def connectAttr(self,shape,i=0,mat=None):
if mat is not None :
#print shape
#print mat+"SG"
cmds.isConnected( shape+'.instObjGroups['+i+']', mat+'SG.dagSetMembers')
#need to get the shape : name+"Shape"
def rotation_matrix(self,angle, direction, point=None,trans=None):
"""
Return matrix to rotate about axis defined by point and direction.
"""
if self._usenumpy:
return Helper.rotation_matrix(angle, direction, point=point,trans=trans)
else :
direction = self.FromVec(direction)
direction.normalize()
out_mat = [1.0, 0.0, 0.0,0.0,
0.0, 1.0, 0.0,0.0,
0.0, 0.0, 1.0,0.0,
0.0, 0.0, 0.0,1.0]
m = self.matrixp2m(out_mat)
# m = om.MTransformationMatrix()
m.setToRotationAxis (direction,angle)
if point is not None:
point = self.FromVec(point)
m.setTranslation(point,om.MSpace.kPostTransform)# = point - (point * m)self.vec2m(trans),om.MSpace.kPostTransform
if trans is not None :
trans = self.FromVec(trans)
m.setTranslation(trans,om.MSpace.kPostTransform)
# M = m2matrix(m)
return m
#==============================================================================
# properties objec
#==============================================================================
def getPropertyObject(self, obj, key=["radius"]):
"""
Return the property "key" of the object obj
* overwrited by children class for each host
@type obj: host Obj
@param obj: the object that contains the property
@type key: string
@param key: name of the property
@rtype : int, float, str, dict, list
@return : the property value
"""
res = []
if "pos" in key :
res.append(self.ToVec(self.getTranslation(obj)))
if "scale" in key :
res.append(self.ToVec(self.getScale(obj)))
if "rotation" in key :
mo = self.getTransformation(obj)
m = self.ToMat(mo)#.transpose()
mws = m.transpose()
rotMatj = mws[:]
rotMatj[3][:3]*=0.0
res.append(rotMatj)
if self.getType(obj) == self.SPHERE :
for k in key :
if k == "radius" :
try :
r=cmds.polySphere(obj,q=1,r=1)
except :
r=cmds.sphere(obj,q=1,r=1)
res.append(r)
if self.getType(obj) == self.CYLINDER :
for k in key :
if k == "radius" :
r=cmds.polyCylinder(obj,q=1,r=1)
res.append(r)
elif k == "length" :
h=cmds.polyCylinder(obj,q=1,h=1)
res.append(h)
elif k == "axis" :
ax = cmds.polyCylinder(obj,q=1,axis=1)
res.append(ax)
if self.getType(obj) == self.CUBE :
for k in key :
if k == "length" :
l = self.getBoxSize(obj)#cmds.polyCube(obj, q=True,h=True)
res.append(l)
return res
#===============================================================================
# Texture Mapping / UV
#===============================================================================
def getUV(self,object,faceIndex,vertexIndex,perVertice=True):
mesh = self.getMShape(object)
meshnode = om.MFnMesh(mesh)
#uv=[]
u_util = maya.OpenMaya.MScriptUtil()
u_util.createFromDouble(0.0)
u_ptr = u_util.asFloatPtr()
v_util = maya.OpenMaya.MScriptUtil()
v_util.createFromDouble(0.0)
v_ptr = v_util.asFloatPtr()
if perVertice :
meshnode.getUV(vertexIndex, u_ptr, v_ptr)
u = u_util.getFloat(u_ptr)
v = v_util.getFloat(v_ptr)
return [u,v]
else :
def getuv(faceIndex,iv,u_ptr,v_ptr):
meshnode.getPolygonUV(faceIndex,iv,u_ptr,v_ptr)
u = u_util.getFloat(u_ptr)
v = v_util.getFloat(v_ptr)
return [u,v]
#uv of the face
return [getuv(faceIndex,iv,u_ptr,v_ptr) for iv in range(3)]
#
#
##meshFn = maya.OpenMaya.MFnMesh(node)
##
#u_util = maya.OpenMaya.MScriptUtil()
#u_util.createFromDouble(0.0)
#u_ptr = u_util.asFloatPtr()
#v_util = maya.OpenMaya.MScriptUtil()
#v_util.createFromDouble(0.0)
#v_ptr = v_util.asFloatPtr()
#
#meshFn.getUV(0, u_ptr, v_ptr)
#
#u = u_util.getFloat(u_ptr)
#v = v_util.getFloat(v_ptr))
##getPolygonUVid
##getPolygonUV
#
#should be faster ?
def setUVs(self,object,uvs):
#uvs is a dictionary key are faceindex, values it the actual uv for the 3-4 vertex
ob = self.getObject(object)
node = self.getNode('mesh_'+ob)
meshnode = om.MFnMesh(node)
meshnode.clearUVs()
u = om.MFloatArray()
v = om.MFloatArray()
uvCounts = om.MIntArray()
uvIds = om.MIntArray()
i = 0
for f in uvs:
for k,uv in enumerate(uvs[f]):
uvIds.append(i)
uvCounts.append(len(uvs[f]))
u.append(uv[0])
v.append(uv[1])
#meshnode.setUV(i,uv[0],uv[1])
#meshnode.assignUV(f,k,i)
i = i +1
meshnode.setUVs(u,v)
meshnode.assignUVs(uvCounts,uvIds)
def setUV(self,object,faceIndex,vertexIndex,uv,perVertice=True,uvid=0):
ob = self.getObject(object)
node = self.getNode('mesh_'+ob)
meshnode = om.MFnMesh(node)
for k in range(3):
luv = uv[k]
meshnode.setUV(uvid,luv[0],luv[1])
meshnode.assignUV(faceIndex,k,uvid)
uvid = uvid +1
return uvid
def hyperShade_meVertCol(self):
#mel command : nodeReleaseCallback graph1HyperShadeEd mentalrayVertexColors1 none;
# nodeOutlinerInputsCmd connectWindow|tl|cwForm|connectWindowPane|leftSideCW connectWindow|tl|cwForm|connectWindowPane|rightSideCW; nodeOutliner -e -r connectWindow|tl|cwForm|connectWindowPane|rightSideCW;
# connectAttr -f mesh_MSMS_MOL1crn.colorSet[0].colorName mentalrayVertexColors1.cpvSets[0];
# // Result: Connected mesh_MSMS_MOL1crn.colorSet.colorName to mentalrayVertexColors1.cpvSets. //
# // Result: connectWindow|tl|cwForm|connectWindowPane|rightSideCW //
pass
#==============================================================================
# import / expor / read load / save
#==============================================================================
def readFile(self,filename,**kw):
fileName, fileExtension = os.path.splitext(filename)
fileExtension=fileExtension.replace(".","")
fileExtension=fileExtension.upper()
if fileExtension == "MA":
fileExtension = "mayaAscii"
elif fileExtension == "DAE":
fileExtension = "DAE_FBX"
elif fileExtension == "FBX":
pass
else :
print ("not supported by uPy, contact us!")
return
# doc = self.getCurrentScene()
cmds.file(filename ,type=fileExtension,loadReferenceDepth="all", i=True ) #merge the documets
# c4d.documents.MergeDocument(doc,filename,c4d.SCENEFILTER_OBJECTS|c4d.SCENEFILTER_MATERIALS)
def read(self,filename,**kw):
fileName, fileExtension = os.path.splitext(filename)
fileExtension=fileExtension.replace(".","")
fileExtension=fileExtension.upper()
if fileExtension == "MA":
fileExtension = "mayaAscii"
cmds.file(filename ,type=fileExtension,loadReferenceDepth="all", i=True )
elif fileExtension == "DAE" or fileExtension == "FBX":
import maya.mel as mel
#mel.eval('FBXImportMode -v exmerge;')
filename = filename.replace("\\","\\\\")
mel.eval('FBXImport -f "%s" -t 0;' % filename)#FBXGetTakeName ?
else :
print ("not supported by uPy, contact us!")
return
def write(self,listObj,**kw):
pass
#==============================================================================
# raycasting
#==============================================================================
def raycast(self,obj,start, end, length, **kw ):
#posted on cgtalk.com
#part of http://code.google.com/p/dynamica/
mo = self.getTransformation(obj)
mi = mo.asMatrixInverse()
mat = self.ToMat(mi)#.transpose()
point = self.ApplyMatrix([start],numpy.array(mat).transpose())[0]
direction = self.ApplyMatrix([end],numpy.array(mat).transpose())[0]
#om.MGlobal.clearSelectionList()
om.MGlobal.selectByName(obj)
sList = om.MSelectionList()
#Assign current selection to the selection list object
om.MGlobal.getActiveSelectionList(sList)
item = om.MDagPath()
sList.getDagPath(0, item)
item.extendToShape()
fnMesh = om.MFnMesh(item)
raySource = om.MFloatPoint(float(point[0]), float(point[1]), float(point[2]), 1.0)
rayDir = om.MFloatVector(float(direction[0]-point[0]), float(direction[1]-point[1]), float(direction[2]-point[2]))
faceIds = None
triIds = None
idsSorted = False
testBothDirections = False
worldSpace = om.MSpace.kWorld
maxParam = length#999999
accelParams = None
sortHits = True
hitPoints = om.MFloatPointArray()
#hitRayParams = om.MScriptUtil().asFloatPtr()
hitRayParams = om.MFloatArray()
hitFaces = om.MIntArray()
hitTris = None
hitBarys1 = None
hitBarys2 = None
tolerance = 0.0001
#http://download.autodesk.com/us/maya/2010help/API/class_m_fn_mesh.html#114943af4e75410b0172c58b2818398f
hit = fnMesh.allIntersections(raySource, rayDir, faceIds, triIds, idsSorted, worldSpace,
maxParam, testBothDirections, accelParams, sortHits,
hitPoints, hitRayParams, hitFaces, hitTris, hitBarys1,
hitBarys2, tolerance)
om.MGlobal.clearSelectionList()
#print hit, len(hitFaces)
if "count" in kw :
#result = int(fmod(len(hitFaces), 2))
return hit, len(hitFaces)
#clear selection as may cause problem if the function is called multiple times in succession
return result | gpl-3.0 | -7,838,186,313,154,218,000 | 39.361557 | 212 | 0.542971 | false | 3.67479 | false | false | false |
phenoxim/nova | nova/tests/json_ref.py | 1 | 2271 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_serialization import jsonutils
def _resolve_ref(ref, base_path):
file_path, _, json_path = ref.partition('#')
if json_path:
raise NotImplementedError('JSON refs with JSON path after the "#" is '
'not yet supported')
path = os.path.join(base_path, file_path)
# binary mode is needed due to bug/1515231
with open(path, 'r+b') as f:
ref_value = jsonutils.load(f)
base_path = os.path.dirname(path)
res = resolve_refs(ref_value, base_path)
return res
def resolve_refs(obj_with_refs, base_path):
if isinstance(obj_with_refs, list):
for i, item in enumerate(obj_with_refs):
obj_with_refs[i] = resolve_refs(item, base_path)
elif isinstance(obj_with_refs, dict):
if '$ref' in obj_with_refs.keys():
ref = obj_with_refs.pop('$ref')
resolved_ref = _resolve_ref(ref, base_path)
# the rest of the ref dict contains overrides for the ref. Apply
# those overrides recursively here.
_update_dict_recursively(resolved_ref, obj_with_refs)
return resolved_ref
else:
for key, value in obj_with_refs.items():
obj_with_refs[key] = resolve_refs(value, base_path)
else:
# scalar, nothing to do
pass
return obj_with_refs
def _update_dict_recursively(d, update):
"""Update dict d recursively with data from dict update"""
for k, v in update.items():
if k in d and isinstance(d[k], dict) and isinstance(v, dict):
_update_dict_recursively(d[k], v)
else:
d[k] = v
| apache-2.0 | 7,998,802,605,201,372,000 | 35.047619 | 78 | 0.625716 | false | 3.772425 | false | false | false |
claytantor/coinbase4py | webapp/settings.py | 1 | 4533 | import os
from ConfigParser import RawConfigParser
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_DIR = os.path.dirname(__file__)
CONF_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
here = lambda x: os.path.join(os.path.abspath(os.path.dirname(__file__)), x)
# you will need to copy the example and make custom
# settings for the environment
config = RawConfigParser()
#place in a dir that is not managed in the code base
# print 'config dir: {0}/conf/gitpatron_settings.ini'.format(CONF_DIR)
config.read('{0}/conf/coinbase4py_settings.ini'.format(CONF_DIR))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config.get('secrets','DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config.get('debug','DEBUG')
TEMPLATE_DEBUG = config.get('debug','TEMPLATE_DEBUG')
ENVIRONMENT = config.get('base','ENVIRONMENT')
ALLOWED_HOSTS = []
#the database for the app
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_DIR, 'coinbase4py.db'),
}
}
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.staticfiles',
'coinbase4py',
'webapp',
'webapp.templatetags',
)
TEMPLATE_CONTEXT_PROCESSORS = ("django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages")
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'webapp.urls'
WSGI_APPLICATION = 'webapp.wsgi.application'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# Additional locations of static files
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, '../', 'static/'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_DIRS = (
PROJECT_DIR + '/../webapp/templates/',
)
USER_ONE=config.get('coinbase4py','USER_ONE')
USER_TWO=config.get('coinbase4py','USER_TWO')
TEST_STATE_DIR=config.get('coinbase4py','TEST_STATE_DIR')
COINBASE4PY_PW_SECRET_KEY=config.get('coinbase4py','COINBASE4PY_PW_SECRET_KEY')
COINBASE_OAUTH_CLIENT_APP=config.get('coinbase','COINBASE_OAUTH_CLIENT_APP')
COINBASE_OAUTH_CLIENT_ID=config.get('coinbase','COINBASE_OAUTH_CLIENT_ID')
COINBASE_OAUTH_CLIENT_SECRET=config.get('coinbase','COINBASE_OAUTH_CLIENT_SECRET')
COINBASE_OAUTH_CLIENT_CALLBACK=config.get('coinbase','COINBASE_OAUTH_CLIENT_CALLBACK')
COINBASE4PY_APP_URL=config.get('coinbase','COINBASE4PY_APP_URL')
COINBASE_ORDER_CALLBACK='{0}/{1}'.format(
config.get('coinbase','COINBASE4PY_APP_URL'),
config.get('coinbase','COINBASE_ORDER_CALLBACK'))
| apache-2.0 | 6,435,432,027,036,842,000 | 33.869231 | 86 | 0.701302 | false | 3.591918 | true | false | false |
rddim/Notepad-plus-plus | scintilla/qt/ScintillaEdit/WidgetGen.py | 5 | 8222 | #!/usr/bin/env python3
# WidgetGen.py - regenerate the ScintillaWidgetCpp.cpp and ScintillaWidgetCpp.h files
# Check that API includes all gtkscintilla2 functions
import sys
import os
import getopt
scintillaDirectory = "../.."
scintillaScriptsDirectory = os.path.join(scintillaDirectory, "scripts")
sys.path.append(scintillaScriptsDirectory)
import Face
from FileGenerator import GenerateFile
def underscoreName(s):
# Name conversion fixes to match gtkscintilla2
irregular = ['WS', 'EOL', 'AutoC', 'KeyWords', 'BackSpace', 'UnIndents', 'RE', 'RGBA']
for word in irregular:
replacement = word[0] + word[1:].lower()
s = s.replace(word, replacement)
out = ""
for c in s:
if c.isupper():
if out:
out += "_"
out += c.lower()
else:
out += c
return out
def normalisedName(s, options, role=None):
if options["qtStyle"]:
if role == "get":
s = s.replace("Get", "")
return s[0].lower() + s[1:]
else:
return underscoreName(s)
typeAliases = {
"position": "int",
"line": "int",
"pointer": "int",
"colour": "int",
"keymod": "int",
"string": "const char *",
"stringresult": "const char *",
"cells": "const char *",
}
def cppAlias(s):
if s in typeAliases:
return typeAliases[s]
elif Face.IsEnumeration(s):
return "int"
else:
return s
understoodTypes = ["", "void", "int", "bool", "position", "line", "pointer",
"colour", "keymod", "string", "stringresult", "cells"]
def understoodType(t):
return t in understoodTypes or Face.IsEnumeration(t)
def checkTypes(name, v):
understandAllTypes = True
if not understoodType(v["ReturnType"]):
#~ print("Do not understand", v["ReturnType"], "for", name)
understandAllTypes = False
if not understoodType(v["Param1Type"]):
#~ print("Do not understand", v["Param1Type"], "for", name)
understandAllTypes = False
if not understoodType(v["Param2Type"]):
#~ print("Do not understand", v["Param2Type"], "for", name)
understandAllTypes = False
return understandAllTypes
def arguments(v, stringResult, options):
ret = ""
p1Type = cppAlias(v["Param1Type"])
if p1Type == "int":
p1Type = "sptr_t"
if p1Type:
ret = ret + p1Type + " " + normalisedName(v["Param1Name"], options)
p2Type = cppAlias(v["Param2Type"])
if p2Type == "int":
p2Type = "sptr_t"
if p2Type and not stringResult:
if p1Type:
ret = ret + ", "
ret = ret + p2Type + " " + normalisedName(v["Param2Name"], options)
return ret
def printPyFile(f, options):
out = []
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["val"]:
out.append(name + "=" + v["Value"])
if feat in ["evt"]:
out.append("SCN_" + name.upper() + "=" + v["Value"])
if feat in ["fun"]:
out.append("SCI_" + name.upper() + "=" + v["Value"])
return out
def printHFile(f, options):
out = []
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
constDeclarator = " const" if feat == "get" else ""
returnType = cppAlias(v["ReturnType"])
if returnType == "int":
returnType = "sptr_t"
stringResult = v["Param2Type"] == "stringresult"
if stringResult:
returnType = "QByteArray"
out.append("\t" + returnType + " " + normalisedName(name, options, feat) + "(" +
arguments(v, stringResult, options)+
")" + constDeclarator + ";")
return out
def methodNames(f, options):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
yield normalisedName(name, options)
def printCPPFile(f, options):
out = []
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
constDeclarator = " const" if feat == "get" else ""
featureDefineName = "SCI_" + name.upper()
returnType = cppAlias(v["ReturnType"])
if returnType == "int":
returnType = "sptr_t"
stringResult = v["Param2Type"] == "stringresult"
if stringResult:
returnType = "QByteArray"
returnStatement = ""
if returnType != "void":
returnStatement = "return "
out.append(returnType + " ScintillaEdit::" + normalisedName(name, options, feat) + "(" +
arguments(v, stringResult, options) +
")" + constDeclarator + " {")
returns = ""
if stringResult:
returns += " " + returnStatement + "TextReturner(" + featureDefineName + ", "
if "*" in cppAlias(v["Param1Type"]):
returns += "(sptr_t)"
if v["Param1Name"]:
returns += normalisedName(v["Param1Name"], options)
else:
returns += "0"
returns += ");"
else:
returns += " " + returnStatement + "send(" + featureDefineName + ", "
if "*" in cppAlias(v["Param1Type"]):
returns += "(sptr_t)"
if v["Param1Name"]:
returns += normalisedName(v["Param1Name"], options)
else:
returns += "0"
returns += ", "
if "*" in cppAlias(v["Param2Type"]):
returns += "(sptr_t)"
if v["Param2Name"]:
returns += normalisedName(v["Param2Name"], options)
else:
returns += "0"
returns += ");"
out.append(returns)
out.append("}")
out.append("")
return out
def gtkNames():
# The full path on my machine: should be altered for anyone else
p = "C:/Users/Neil/Downloads/wingide-source-4.0.1-1/wingide-source-4.0.1-1/external/gtkscintilla2/gtkscintilla.c"
with open(p) as f:
for l in f.readlines():
if "gtk_scintilla_" in l:
name = l.split()[1][14:]
if '(' in name:
name = name.split('(')[0]
yield name
def usage():
print("WidgetGen.py [-c|--clean][-h|--help][-u|--underscore-names]")
print("")
print("Generate full APIs for ScintillaEdit class and ScintillaConstants.py.")
print("")
print("options:")
print("")
print("-c --clean remove all generated code from files")
print("-h --help display this text")
print("-u --underscore-names use method_names consistent with GTK+ standards")
def readInterface(cleanGenerated):
f = Face.Face()
if not cleanGenerated:
f.ReadFromFile("../../include/Scintilla.iface")
return f
def main(argv):
# Using local path for gtkscintilla2 so don't default to checking
checkGTK = False
cleanGenerated = False
qtStyleInterface = True
# The --gtk-check option checks for full coverage of the gtkscintilla2 API but
# depends on a particular directory so is not mentioned in --help.
opts, args = getopt.getopt(argv, "hcgu", ["help", "clean", "gtk-check", "underscore-names"])
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-c", "--clean"):
cleanGenerated = True
elif opt in ("-g", "--gtk-check"):
checkGTK = True
elif opt in ("-u", "--underscore-names"):
qtStyleInterface = False
options = {"qtStyle": qtStyleInterface}
f = readInterface(cleanGenerated)
try:
GenerateFile("ScintillaEdit.cpp.template", "ScintillaEdit.cpp",
"/* ", True, printCPPFile(f, options))
GenerateFile("ScintillaEdit.h.template", "ScintillaEdit.h",
"/* ", True, printHFile(f, options))
GenerateFile("../ScintillaEditPy/ScintillaConstants.py.template",
"../ScintillaEditPy/ScintillaConstants.py",
"# ", True, printPyFile(f, options))
if checkGTK:
names = set(methodNames(f))
#~ print("\n".join(names))
namesGtk = set(gtkNames())
for name in namesGtk:
if name not in names:
print(name, "not found in Qt version")
for name in names:
if name not in namesGtk:
print(name, "not found in GTK+ version")
except:
raise
if cleanGenerated:
for file in ["ScintillaEdit.cpp", "ScintillaEdit.h", "../ScintillaEditPy/ScintillaConstants.py"]:
try:
os.remove(file)
except OSError:
pass
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-3.0 | -2,808,446,952,764,096,500 | 28.679104 | 114 | 0.610071 | false | 3.096798 | false | false | false |
neocogent/electrum | setup.py | 1 | 3017 | #!/usr/bin/env python3
# python setup.py sdist --format=zip,gztar
import os
import sys
import platform
import importlib.util
import argparse
import subprocess
from setuptools import setup, find_packages
from setuptools.command.install import install
MIN_PYTHON_VERSION = "3.6.1"
_min_python_version_tuple = tuple(map(int, (MIN_PYTHON_VERSION.split("."))))
if sys.version_info[:3] < _min_python_version_tuple:
sys.exit("Error: Electrum requires Python version >= %s..." % MIN_PYTHON_VERSION)
with open('contrib/requirements/requirements.txt') as f:
requirements = f.read().splitlines()
with open('contrib/requirements/requirements-hw.txt') as f:
requirements_hw = f.read().splitlines()
# load version.py; needlessly complicated alternative to "imp.load_source":
version_spec = importlib.util.spec_from_file_location('version', 'electrum/version.py')
version_module = version = importlib.util.module_from_spec(version_spec)
version_spec.loader.exec_module(version_module)
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
icons_dirname = 'pixmaps'
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
icons_dirname = 'icons'
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, icons_dirname), ['electrum/gui/icons/electrum.png']),
]
extras_require = {
'hardware': requirements_hw,
'fast': ['pycryptodomex'],
'gui': ['pyqt5'],
}
extras_require['full'] = [pkg for sublist in list(extras_require.values()) for pkg in sublist]
setup(
name="Electrum",
version=version.ELECTRUM_VERSION,
python_requires='>={}'.format(MIN_PYTHON_VERSION),
install_requires=requirements,
extras_require=extras_require,
packages=[
'electrum',
'electrum.gui',
'electrum.gui.qt',
'electrum.plugins',
] + [('electrum.plugins.'+pkg) for pkg in find_packages('electrum/plugins')],
package_dir={
'electrum': 'electrum'
},
package_data={
'': ['*.txt', '*.json', '*.ttf', '*.otf'],
'electrum': [
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
],
'electrum.gui': [
'icons/*',
],
},
scripts=['electrum/electrum'],
data_files=data_files,
description="Lightweight Bitcoin Wallet",
author="Thomas Voegtlin",
author_email="[email protected]",
license="MIT Licence",
url="https://electrum.org",
long_description="""Lightweight Bitcoin Wallet""",
)
| mit | 6,636,443,891,689,976,000 | 31.095745 | 94 | 0.640371 | false | 3.378499 | false | false | false |
M4rtinK/tsubame | core/platform/base_platform_module.py | 1 | 8584 | # -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Base class for Tsubame platform modules.
#----------------------------------------------------------------------------
# Copyright 2017, Martin Kolman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from core import constants
from core.signal import Signal
class PlatformModule(object):
"""A Tsubame base platform module."""
def __init__(self):
self.internet_connectivity_changed = Signal()
@property
def platform_id(self):
"""Return an unique string identifying the device module."""
return None
@property
def device_name(self):
"""Return a human readable name of the device."""
return "unknown device"
@property
def preferred_window_wh(self):
"""Return the preferred application window size in pixels."""
# we'll use VGA as a default value
return 640, 480
@property
def start_in_fullscreen(self):
"""Return if Tsubame should be started fullscreen.
NOTE: this is a default value and can be overridden by a
user-set options key, etc.
"""
return False
@property
def fullscreen_only(self):
"""Report if the platform is fullscreen-only.
Some platforms are basically fullscreen-only (Harmattan),
as applications only switch between fullscreen and a task switcher.
"""
return False
@property
def screen_blanking_control_supported(self):
"""There is no universal way to control screen blanking, so its off by default.
NOTE: Screen blanking can be implemented and enabled in the corresponding
device or gui module.
"""
return False
def pause_screen_blanking(self):
"""Pause screen blanking controlled by device module.
calling this method should pause screen blanking
* on mobile devices, screen balking needs to be paused every n seconds
* on desktop, one call might be enough, still, several calls should
be handled without issues
* also what about restoring the screen blanking on Desktop
once Tsubame exits ?
"""
pass
@property
def supported_gui_module_ids(self):
"""Supported GUI module IDs, ordered by preference from left to right.
THE ":" NOTATION
single GUI modules might support different subsets, the usability of
these subsets can vary based on the current platform
-> this functions enabled device modules to report which GUI subsets
are most suitable for the given platform
-> the string starts with the module id prefix, is separated by : and
continues with the subset id
EXAMPLE: ["QML:harmattan","QML:indep","GTK"]
-> QML GUI with Harmattan Qt Components is preferred,
QML GUI with platform independent Qt Components is less preferred
and the GTK GUI is set as a fallback if everything else fails
CURRENT USAGE
there are different incompatible native Qt Component sets
on various platforms (Harmattan QTC, Plasma Active QTC, Jolla QTC,...)
the QML GUI aims to support most of these components sets to provide
native look & feel and the subset id is used by the device module
to signal the GUI module which QTC component to use
"""
return ["qt5"] # the Qt 5 GUI is the default
@property
def has_notification_support(self):
"""Report if the device provides its own notification method."""
return False
def notify(self, message, msTimeout=0, icon=""):
"""Send a notification using platform/device specific API."""
pass
@property
def has_keyboard(self):
"""Report if the device has a hardware keyboard."""
return True
@property
def has_buttons(self):
"""Report if the device has some usable buttons other than a hardware keyboard."""
if self.has_volume_keys:
return True
else:
return False
@property
def has_volume_keys(self):
"""Report if the device has application-usable volume control keys or their equivalent.
Basically basically just two nearby button that can be used for zooming up/down,
skipping to next/previous and similar actions.
"""
return False
def enable_volume_keys(self):
pass
@property
def profile_path(self):
"""Return path to the main profile folder or None if default path should be used.
:returns: path to the profile folder or None
:rtype: str or None
"""
return None
@property
def needs_quit_button(self):
"""On some platforms applications need to provide their own shutdown buttons."""
return False
@property
def needs_back_button(self):
"""Some platforms (Sailfish OS) don't need a in-UI back button."""
return True
@property
def needs_page_background(self):
"""Some platforms (Sailfish OS) don't need a page background."""
return True
@property
def handles_url_opening(self):
"""Some platform provide specific APIs for URL opening.
For example, on the N900 a special DBUS command not available
elsewhere needs to be used.
"""
return False
def open_url(self, url):
"""Open a URL."""
import webbrowser
webbrowser.open(url)
@property
def connectivity_status(self):
"""Report the current status of internet connectivity on the device.
None - status reporting not supported or status unknown
True - connected to the Internet
False - disconnected from the Internet
"""
connected = constants.InternetConnectivityStatus.OFFLINE
# open the /proc/net/route file
with open('/proc/net/route', 'r') as f:
for line in f:
# the line is delimited by tabulators
lineSplit = line.split('\t')
# check if the length is valid
if len(lineSplit) >= 11:
if lineSplit[1] == '00000000' and lineSplit[7] == '00000000':
# if destination and mask are 00000000,
# it is probably an Internet connection
connected = constants.InternetConnectivityStatus.ONLINE
break
return connected
def enable_internet_connectivity(self):
"""Try to make sure that the device connects to the Internet."""
pass
@property
def device_type(self):
"""Returns type of the current device.
The device can currently be either a PC
(desktop or laptop/notebook),
smartphone or a tablet.
This is currently used mainly for rough
DPI estimation.
Example:
* high resolution & PC -> low DPI
* high resolution & smartphone -> high DPI
* high resolution & smartphone -> low DPI
This could also be used in the future to
use different PC/smartphone/tablet GUI styles.
By default, the device type is unknown.
"""
return None
@property
def qmlscene_command(self):
"""What should be called to start the qmlscene.
:returns: command to run to start qmlscene
:rtype: str
"""
return "qmlscene"
@property
def universal_components_backend(self):
"""Path to a Universal Components backend suitable for the given platform.
We default to the Controls UC backend.
:returns: path to suitable UC backend
:rtype: str
"""
return "controls"
| gpl-3.0 | -3,431,817,031,588,530,700 | 33.894309 | 95 | 0.612069 | false | 4.846979 | false | false | false |
avlach/univbris-ocf | vt_manager/src/python/vt_manager/communication/sfa/util/callids.py | 2 | 2280 | #!/usr/bin/python
import threading
import time
#from vt_manager.communication.sfa.util.sfalogging import logger
"""
Callids: a simple mechanism to remember the call ids served so fas
memory-only for now - thread-safe
implemented as a (singleton) hash 'callid'->timestamp
"""
debug=False
class _call_ids_impl (dict):
_instance = None
# 5 minutes sounds amply enough
purge_timeout=5*60
# when trying to get a lock
retries=10
# in ms
wait_ms=100
def __init__(self):
self._lock=threading.Lock()
# the only primitive
# return True if the callid is unknown, False otherwise
def already_handled (self,call_id):
# if not provided in the call...
if not call_id: return False
has_lock=False
for attempt in range(_call_ids_impl.retries):
if debug: logger.debug("Waiting for lock (%d)"%attempt)
if self._lock.acquire(False):
has_lock=True
if debug: logger.debug("got lock (%d)"%attempt)
break
time.sleep(float(_call_ids_impl.wait_ms)/1000)
# in the unlikely event where we can't get the lock
if not has_lock:
logger.warning("_call_ids_impl.should_handle_call_id: could not acquire lock")
return False
# we're good to go
if self.has_key(call_id):
self._purge()
self._lock.release()
return True
self[call_id]=time.time()
self._purge()
self._lock.release()
if debug: logger.debug("released lock")
return False
def _purge(self):
now=time.time()
o_keys=[]
for (k,v) in self.iteritems():
if (now-v) >= _call_ids_impl.purge_timeout: o_keys.append(k)
for k in o_keys:
if debug: logger.debug("Purging call_id %r (%s)"%(k,time.strftime("%H:%M:%S",time.localtime(self[k]))))
del self[k]
if debug:
logger.debug("AFTER PURGE")
for (k,v) in self.iteritems(): logger.debug("%s -> %s"%(k,time.strftime("%H:%M:%S",time.localtime(v))))
def Callids ():
if not _call_ids_impl._instance:
_call_ids_impl._instance = _call_ids_impl()
return _call_ids_impl._instance
| bsd-3-clause | 5,228,022,733,388,344,000 | 30.666667 | 115 | 0.578509 | false | 3.653846 | false | false | false |
live-clones/dolfin-adjoint | timestepping/python/timestepping/pre_assembled_equations.py | 1 | 21818 | #!/usr/bin/env python2
# Copyright (C) 2011-2012 by Imperial College London
# Copyright (C) 2013 University of Oxford
# Copyright (C) 2014-2016 University of Edinburgh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3 of the License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import dolfin
import ufl
from .caches import *
from .equation_solvers import *
from .exceptions import *
from .fenics_overrides import *
from .fenics_utils import *
from .pre_assembled_forms import *
from .statics import *
__all__ = \
[
"PAEquationSolver",
"pa_solve"
]
class PAEquationSolver(EquationSolver):
"""
An EquationSolver applying additional pre-assembly and linear solver caching
optimisations. This utilises pre-assembly of static terms. The arguments match
those accepted by the DOLFIN solve function, with the following differences:
Argument 1: May be a general equation. Linear systems are detected
automatically.
initial_guess: The initial guess for an iterative solver.
adjoint_solver_parameters: A dictionary of linear solver parameters for an
adjoint equation solve.
"""
def __init__(self, *args, **kwargs):
args, kwargs = copy.copy(args), copy.copy(kwargs)
# Process arguments not to be passed to _extract_args
if "initial_guess" in kwargs:
if not kwargs["initial_guess"] is None and not isinstance(kwargs["initial_guess"], dolfin.Function):
raise InvalidArgumentException("initial_guess must be a Function")
initial_guess = kwargs["initial_guess"]
del(kwargs["initial_guess"])
else:
initial_guess = None
if "adjoint_solver_parameters" in kwargs:
if not kwargs["adjoint_solver_parameters"] is None and not isinstance(kwargs["adjoint_solver_parameters"], dict):
raise InvalidArgumentException("adjoint_solver_parameters must be a dictionary")
adjoint_solver_parameters = kwargs["adjoint_solver_parameters"]
del(kwargs["adjoint_solver_parameters"])
else:
adjoint_solver_parameters = None
if "pre_assembly_parameters" in kwargs:
pre_assembly_parameters = kwargs["pre_assembly_parameters"]
del(kwargs["pre_assembly_parameters"])
else:
pre_assembly_parameters = {}
# Process remaining arguments
if "form_compiler_parameters" in kwargs:
raise NotImplementedException("form_compiler_parameters argument not supported")
eq, x, bcs, J, tol, goal, form_parameters, solver_parameters = dolfin.fem.solving._extract_args(*args, **kwargs)
# Relax requirements on equation syntax
eq_lhs_rank = form_rank(eq.lhs)
if eq_lhs_rank == 1:
form = eq.lhs
if not is_zero_rhs(eq.rhs):
form -= eq.rhs
if x in ufl.algorithms.extract_coefficients(form):
if J is None:
J = derivative(form, x)
if x in ufl.algorithms.extract_coefficients(J):
# Non-linear solve
is_linear = False
else:
# Linear solve, rank 2 LHS
cache_info("Detected that solve for %s is linear" % x.name())
form = dolfin.replace(form, {x:dolfin.TrialFunction(x.function_space())})
eq = dolfin.lhs(form) == dolfin.rhs(form)
eq_lhs_rank = form_rank(eq.lhs)
assert(eq_lhs_rank == 2)
is_linear = True
else:
# Linear solve, rank 1 LHS
is_linear = True
elif eq_lhs_rank == 2:
form = eq.lhs
if not is_zero_rhs(eq.rhs):
form -= eq.rhs
if not x in ufl.algorithms.extract_coefficients(form):
# Linear solve, rank 2 LHS
eq = dolfin.lhs(form) == dolfin.rhs(form)
eq_lhs_rank = form_rank(eq.lhs)
assert(eq_lhs_rank == 2)
is_linear = True
else:
# ??
raise InvalidArgumentException("Invalid equation")
# Initial guess sanity checking
if is_linear:
if not "krylov_solver" in solver_parameters:
solver_parameters["krylov_solver"] = {}
def initial_guess_enabled():
return solver_parameters["krylov_solver"].get("nonzero_initial_guess", False)
def initial_guess_disabled():
return not solver_parameters["krylov_solver"].get("nonzero_initial_guess", True)
def enable_initial_guess():
solver_parameters["krylov_solver"]["nonzero_initial_guess"] = True
return
if initial_guess is None:
if initial_guess_enabled():
initial_guess = x
elif eq_lhs_rank == 1:
# Supplied an initial guess for a linear solve with a rank 1 LHS -
# ignore it
initial_guess = None
elif "linear_solver" in solver_parameters and not solver_parameters["linear_solver"] in ["direct", "lu"] and not dolfin.has_lu_solver_method(solver_parameters["linear_solver"]):
# Supplied an initial guess with a Krylov solver - check the
# initial_guess solver parameter
if initial_guess_disabled():
raise ParameterException("initial_guess cannot be set if nonzero_initial_guess solver parameter is False")
enable_initial_guess()
elif is_linear:
# Supplied an initial guess for a linear solve with an LU solver -
# ignore it
initial_guess = None
# Initialise
EquationSolver.__init__(self, eq, x, bcs,
solver_parameters = solver_parameters,
adjoint_solver_parameters = adjoint_solver_parameters,
pre_assembly_parameters = pre_assembly_parameters)
self.__args = args
self.__kwargs = kwargs
self.__J = J
self.__tol = tol
self.__goal = goal
self.__form_parameters = form_parameters
self.__initial_guess = initial_guess
# Assemble
self.reassemble()
return
def reassemble(self, *args):
"""
Reassemble the PAEquationSolver. If no arguments are supplied, reassemble
both the LHS and RHS. Otherwise, only reassemble the LHS or RHS if they
depend upon the supplied Constant s or Function s. Note that this does
not clear the assembly or linear solver caches -- hence if a static
Constant, Function, or DirichletBC is modified then one should clear the
caches before calling reassemble on the PAEquationSolver.
"""
x, eq, bcs, linear_solver_parameters, pre_assembly_parameters = self.x(), \
self.eq(), self.bcs(), self.linear_solver_parameters(), \
self.pre_assembly_parameters()
x_deps = self.dependencies()
a, L, linear_solver = None, None, None
if self.is_linear():
for dep in x_deps:
if dep is x:
raise DependencyException("Invalid non-linear solve")
def assemble_lhs():
eq_lhs_rank = form_rank(eq.lhs)
if eq_lhs_rank == 2:
static_bcs = n_non_static_bcs(bcs) == 0
static_form = is_static_form(eq.lhs)
if not pre_assembly_parameters["equations"]["symmetric_boundary_conditions"] and len(bcs) > 0 and static_bcs and static_form:
a = assembly_cache.assemble(eq.lhs,
bcs = bcs, symmetric_bcs = False)
cache_info("Pre-assembled LHS terms in solve for %s : 1" % x.name())
cache_info("Non-pre-assembled LHS terms in solve for %s: 0" % x.name())
linear_solver = linear_solver_cache.linear_solver(eq.lhs,
linear_solver_parameters,
bcs = bcs, symmetric_bcs = False,
a = a)
linear_solver.set_operator(a)
elif len(bcs) == 0 and static_form:
a = assembly_cache.assemble(eq.lhs)
cache_info("Pre-assembled LHS terms in solve for %s : 1" % x.name())
cache_info("Non-pre-assembled LHS terms in solve for %s: 0" % x.name())
linear_solver = linear_solver_cache.linear_solver(eq.lhs,
linear_solver_parameters,
a = a)
linear_solver.set_operator(a)
else:
a = PAForm(eq.lhs, pre_assembly_parameters = pre_assembly_parameters["bilinear_forms"])
cache_info("Pre-assembled LHS terms in solve for %s : %i" % (x.name(), a.n_pre_assembled()))
cache_info("Non-pre-assembled LHS terms in solve for %s: %i" % (x.name(), a.n_non_pre_assembled()))
linear_solver = linear_solver_cache.linear_solver(eq.lhs,
linear_solver_parameters, pre_assembly_parameters["bilinear_forms"],
static = a.is_static() and static_bcs,
bcs = bcs, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
else:
assert(eq_lhs_rank == 1)
a = PAForm(eq.lhs, pre_assembly_parameters = pre_assembly_parameters["linear_forms"])
cache_info("Pre-assembled LHS terms in solve for %s : %i" % (x.name(), a.n_pre_assembled()))
cache_info("Non-pre-assembled LHS terms in solve for %s: %i" % (x.name(), a.n_non_pre_assembled()))
linear_solver = None
return a, linear_solver
def assemble_rhs():
L = PAForm(eq.rhs, pre_assembly_parameters = pre_assembly_parameters["linear_forms"])
cache_info("Pre-assembled RHS terms in solve for %s : %i" % (x.name(), L.n_pre_assembled()))
cache_info("Non-pre-assembled RHS terms in solve for %s: %i" % (x.name(), L.n_non_pre_assembled()))
return L
if len(args) == 0:
a, linear_solver = assemble_lhs()
L = assemble_rhs()
else:
a, linear_solver = self.__a, self.__linear_solver
L = self.__L
lhs_cs = ufl.algorithms.extract_coefficients(eq.lhs)
rhs_cs = ufl.algorithms.extract_coefficients(eq.rhs)
for dep in args:
if dep in lhs_cs:
a, linear_solver = assemble_lhs()
break
for dep in args:
if dep in rhs_cs:
L = assemble_rhs()
break
elif self.solver_parameters().get("nonlinear_solver", "newton") == "newton":
J, hbcs = self.J(), self.hbcs()
def assemble_lhs():
a = PAForm(J, pre_assembly_parameters = pre_assembly_parameters["bilinear_forms"])
cache_info("Pre-assembled LHS terms in solve for %s : %i" % (x.name(), a.n_pre_assembled()))
cache_info("Non-pre-assembled LHS terms in solve for %s: %i" % (x.name(), a.n_non_pre_assembled()))
linear_solver = linear_solver_cache.linear_solver(J,
linear_solver_parameters, pre_assembly_parameters["bilinear_forms"],
static = False,
bcs = hbcs, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
return a, linear_solver
def assemble_rhs():
L = -eq.lhs
if not is_zero_rhs(eq.rhs):
L += eq.rhs
L = PAForm(L, pre_assembly_parameters = pre_assembly_parameters["linear_forms"])
cache_info("Pre-assembled RHS terms in solve for %s : %i" % (x.name(), L.n_pre_assembled()))
cache_info("Non-pre-assembled RHS terms in solve for %s: %i" % (x.name(), L.n_non_pre_assembled()))
return L
if len(args) == 0:
a, linear_solver = assemble_lhs()
L = assemble_rhs()
else:
a, linear_solver = self.__a, self.__linear_solver
L = self.__L
lhs_cs = set(ufl.algorithms.extract_coefficients(J))
rhs_cs = set(ufl.algorithms.extract_coefficients(eq.lhs))
if not is_zero_rhs(eq.rhs):
rhs_cs.update(ufl.algorithms.extract_coefficients(eq.rhs))
for dep in args:
if dep in lhs_cs:
a, linear_solver = assemble_lhs()
break
for dep in args:
if dep in rhs_cs:
L = assemble_rhs()
break
self.__dx = x.vector().copy()
self.__a, self.__L, self.__linear_solver = a, L, linear_solver
return
def dependencies(self, non_symbolic = False):
"""
Return equation dependencies. If non_symbolic is true, also return any
other dependencies which could alter the result of a solve, such as the
initial guess.
"""
if not non_symbolic:
return EquationSolver.dependencies(self, non_symbolic = False)
elif not self.__initial_guess is None:
deps = copy.copy(EquationSolver.dependencies(self, non_symbolic = True))
deps.add(self.__initial_guess)
return deps
else:
return EquationSolver.dependencies(self, non_symbolic = True)
def linear_solver(self):
"""
Return the linear solver.
"""
return self.__linear_solver
def solve(self):
"""
Solve the equation
"""
x, pre_assembly_parameters = self.x(), self.pre_assembly_parameters()
if not self.__initial_guess is None and not self.__initial_guess is x:
x.assign(self.__initial_guess)
if self.is_linear():
bcs, linear_solver = self.bcs(), self.linear_solver()
if isinstance(self.__a, dolfin.GenericMatrix):
L = assemble(self.__L, copy = len(bcs) > 0)
enforce_bcs(L, bcs)
linear_solver.solve(x.vector(), L)
elif self.__a.rank() == 2:
a = assemble(self.__a, copy = len(bcs) > 0)
L = assemble(self.__L, copy = len(bcs) > 0)
apply_bcs(a, bcs, L = L, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(a)
linear_solver.solve(x.vector(), L)
else:
assert(self.__a.rank() == 1)
assert(linear_solver is None)
a = assemble(self.__a, copy = False)
L = assemble(self.__L, copy = False)
assert(L.local_range() == a.local_range())
x.vector().set_local(L.array() / a.array())
x.vector().apply("insert")
enforce_bcs(x.vector(), bcs)
elif self.solver_parameters().get("nonlinear_solver", "newton") == "newton":
# Newton solver, intended to have near identical behaviour to the Newton
# solver supplied with DOLFIN. See
# http://fenicsproject.org/documentation/tutorial/nonlinear.html for
# further details.
default_parameters = dolfin.NewtonSolver.default_parameters()
solver_parameters = self.solver_parameters()
if "newton_solver" in solver_parameters:
parameters = solver_parameters["newton_solver"]
else:
parameters = {}
linear_solver = self.linear_solver()
atol = default_parameters["absolute_tolerance"]
rtol = default_parameters["relative_tolerance"]
max_its = default_parameters["maximum_iterations"]
omega = default_parameters["relaxation_parameter"]
err = default_parameters["error_on_nonconvergence"]
r_def = default_parameters["convergence_criterion"]
for key in parameters.keys():
if key == "absolute_tolerance":
atol = parameters[key]
elif key == "convergence_criterion":
r_def = parameters[key]
elif key == "error_on_nonconvergence":
err = parameters[key]
elif key == "maximum_iterations":
max_its = parameters[key]
elif key == "relative_tolerance":
rtol = parameters[key]
elif key == "relaxation_parameter":
omega = parameters[key]
elif key in ["linear_solver", "preconditioner", "lu_solver", "krylov_solver"]:
pass
elif key in ["method", "report"]:
raise NotImplementedException("Unsupported Newton solver parameter: %s" % key)
else:
raise ParameterException("Unexpected Newton solver parameter: %s" % key)
eq, bcs, hbcs = self.eq(), self.bcs(), self.hbcs()
a, L = self.__a, self.__L
x_name = x.name()
x = x.vector()
enforce_bcs(x, bcs)
dx = self.__dx
if not isinstance(linear_solver, dolfin.GenericLUSolver):
dx.zero()
if r_def == "residual":
l_L = assemble(L, copy = len(hbcs) > 0)
enforce_bcs(l_L, hbcs)
r_0 = l_L.norm("l2")
it = 0
if r_0 >= atol:
l_a = assemble(a, copy = len(hbcs) > 0)
apply_bcs(l_a, hbcs, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(l_a)
linear_solver.solve(dx, l_L)
x.axpy(omega, dx)
it += 1
atol = max(atol, r_0 * rtol)
while it < max_its:
l_L = assemble(L, copy = len(hbcs) > 0)
enforce_bcs(l_L, hbcs)
r = l_L.norm("l2")
if r < atol:
break
l_a = assemble(a, copy = len(hbcs) > 0)
apply_bcs(l_a, hbcs, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(l_a)
linear_solver.solve(dx, l_L)
x.axpy(omega, dx)
it += 1
elif r_def == "incremental":
l_a = assemble(a, copy = len(hbcs) > 0)
l_L = assemble(L, copy = len(hbcs) > 0)
apply_bcs(l_a, hbcs, L = l_L, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(l_a)
linear_solver.solve(dx, l_L)
x.axpy(omega, dx)
it = 1
r_0 = dx.norm("l2")
if r_0 >= atol:
atol = max(atol, rtol * r_0)
while it < max_its:
l_a = assemble(a, copy = len(hbcs) > 0)
l_L = assemble(L, copy = len(hbcs) > 0)
apply_bcs(l_a, hbcs, L = l_L, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(l_a)
linear_solver.solve(dx, l_L)
x.axpy(omega, dx)
it += 1
if dx.norm("l2") < atol:
break
else:
raise ParameterException("Invalid convergence criterion: %s" % r_def)
if it == max_its:
if err:
raise StateException("Newton solve for %s failed to converge after %i iterations" % (x_name, it))
else:
dolfin.warning("Newton solve for %s failed to converge after %i iterations" % (x_name, it))
# dolfin.info("Newton solve for %s converged after %i iterations" % (x_name, it))
else:
problem = dolfin.NonlinearVariationalProblem(self.eq().lhs - self.eq().rhs, x, bcs = self.bcs(), J = self.J())
nl_solver = dolfin.NonlinearVariationalSolver(problem)
nl_solver.parameters.update(self.solver_parameters())
nl_solver.solve()
return
def pa_solve(*args, **kwargs):
"""
Instantiate a PAEquationSolver using the supplied arguments and call its solve
method.
"""
PAEquationSolver(*args, **kwargs).solve()
return
| lgpl-3.0 | 5,529,168,736,928,071,000 | 45.619658 | 189 | 0.534788 | false | 4.178893 | false | false | false |
trnewman/VT-USRP-daughterboard-drivers_python | gnuradio-core/src/lib/filter/generate_gr_fir_sysconfig.py | 1 | 3066 | #!/bin/env python
# -*- python -*-
#
# Copyright 2003 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from generate_utils import *
# ----------------------------------------------------------------
def make_gr_fir_sysconfig_h ():
out = open_and_log_name ('gr_fir_sysconfig.h', 'w')
out.write (copyright)
out.write (
'''
/*
* WARNING: This file is automatically generated by generate_gr_fir_sysconfig.py
* Any changes made to this file will be overwritten.
*/
#ifndef INCLUDED_GR_FIR_SYSCONFIG_H
#define INCLUDED_GR_FIR_SYSCONFIG_H
#include <gr_types.h>
''')
# for sig in fir_signatures:
# out.write ('class gr_fir_' + sig + ';\n')
out.write ('#include <gr_fir_util.h>\n')
out.write (
'''
/*!
* \\brief abstract base class for configuring the automatic selection of the
* fastest gr_fir for your platform.
*
* This is used internally by gr_fir_util.
*/
class gr_fir_sysconfig {
public:
virtual ~gr_fir_sysconfig ();
''')
for sig in fir_signatures:
out.write ((' virtual gr_fir_%s *create_gr_fir_%s (const std::vector<%s> &taps) = 0;\n' %
(sig, sig, tap_type (sig))))
out.write ('\n')
for sig in fir_signatures:
out.write ((' virtual void get_gr_fir_%s_info (std::vector<gr_fir_%s_info> *info) = 0;\n' %
(sig, sig)))
out.write (
'''
};
/*
* This returns the single instance of the appropriate derived class.
* This function must be defined only once in the system, and should be defined
* in the platform specific code.
*/
gr_fir_sysconfig *gr_fir_sysconfig_singleton ();
#endif /* INCLUDED_GR_FIR_SYSCONFIG_H */
''')
out.close ()
# ----------------------------------------------------------------
def make_gr_fir_sysconfig_cc ():
out = open_and_log_name ('gr_fir_sysconfig.cc', 'w')
out.write (copyright)
out.write (
'''
/*
* WARNING: This file is automatically generated by generate_gr_fir_sysconfig.py
* Any changes made to this file will be overwritten.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <gr_fir_sysconfig.h>
gr_fir_sysconfig::~gr_fir_sysconfig ()
{
}
''')
out.close ()
# ----------------------------------------------------------------
def generate ():
make_gr_fir_sysconfig_h ()
make_gr_fir_sysconfig_cc ()
if __name__ == '__main__':
generate ()
| gpl-3.0 | 2,011,145,555,511,358,200 | 23.141732 | 98 | 0.616765 | false | 3.480136 | true | false | false |
Hiestaa/3D-Lsystem | Vector.py | 1 | 1792 | class Vector:
"""represente un vecteur 3d"""
def __init__(self, arg = (0, 0, 0)):
self.x = float(arg[0])
self.y = float(arg[1])
self.z = float(arg[2])
def set(self, val):
if isinstance(val, self.__class__):
self.x = val.x
self.y = val.y
self.z = val.z
else:
self.x = val[0]
self.y = val[1]
self.z = val[2]
return self;
def toString(self):
return "(" + str(self.x) + ", " + str(self.y) + ", " + str(self.z) + ")"
def __mul__(self, other):
if isinstance(other, self.__class__):
return Vector((self.x * other.x, self.y * other.y, self.z * other.z))
else:
return Vector((self.x * other, self.y * other, self.z * other))
def __rmul__(self, other):
if isinstance(other, self.__class__):
return Vector((self.x * other.x, self.y * other.y, self.z * other.z))
else:
return Vector((self.x * other, self.y * other, self.z * other))
def __imul__(self, other):
if isinstance(other, self.__class__):
self.x *= other.x
self.y *= other.y
self.z *= other.z
else:
self.x *= other
self.y *= other
self.z *= other
return self
def __add__(self, other):
if isinstance(other, self.__class__):
return Vector((self.x + other.x, self.y + other.y, self.z + other.z))
else:
return Vector((self.x + other, self.y + other, self.z + other))
def __radd__(self, other):
if isinstance(other, self.__class__):
return Vector((self.x + other.x, self.y + other.y, self.z + other.z))
else:
return Vector((self.x + other, self.y + other, self.z + other))
def __iadd__(self, other):
if isinstance(other, self.__class__):
self.x += other.x
self.y += other.y
self.z += other.z
else:
self.x += other
self.y += other
self.z += other
return self
def toTuple(self):
return (self.x, self.y, self.z) | mit | -1,565,978,637,445,939,500 | 24.985507 | 74 | 0.582589 | false | 2.623719 | false | false | false |
SymbiFlow/prjxray | minitests/litex/uart_ddr/arty/scripts/arty.py | 1 | 4274 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
# This file is Copyright (c) 2015-2020 Florent Kermarrec <[email protected]>
# License: BSD
import argparse
from migen import *
from litex_boards.platforms import arty
from litex.build.xilinx import VivadoProgrammer
from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict
from litex.soc.cores.clock import *
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litedram.init import get_sdram_phy_py_header
from litedram.modules import MT41K128M16
from litedram.phy import s7ddrphy
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_sys4x_dqs = ClockDomain(reset_less=True)
self.clock_domains.cd_clk200 = ClockDomain()
# # #
self.submodules.pll = pll = S7PLL(speedgrade=-1)
self.comb += pll.reset.eq(~platform.request("cpu_reset"))
pll.register_clkin(platform.request("clk100"), 100e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys4x, 4 * sys_clk_freq)
pll.create_clkout(self.cd_sys4x_dqs, 4 * sys_clk_freq, phase=90)
pll.create_clkout(self.cd_clk200, 200e6)
self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_clk200)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCSDRAM):
def __init__(self):
platform = arty.Platform()
sys_clk_freq = int(50e6)
# SoCSDRAM ---------------------------------------------------------------------------------
SoCSDRAM.__init__(
self,
platform,
clk_freq=sys_clk_freq,
ident="Minimal Arty DDR3 Design for tests with Project X-Ray",
ident_version=True,
cpu_type=None,
l2_size=16,
uart_name="bridge")
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# DDR3 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = s7ddrphy.A7DDRPHY(
platform.request("ddram"),
memtype="DDR3",
nphases=4,
sys_clk_freq=sys_clk_freq)
self.add_csr("ddrphy")
sdram_module = MT41K128M16(sys_clk_freq, "1:4")
self.register_sdram(
self.ddrphy,
geom_settings=sdram_module.geom_settings,
timing_settings=sdram_module.timing_settings)
def generate_sdram_phy_py_header(self):
f = open("sdram_init.py", "w")
f.write(
get_sdram_phy_py_header(
self.sdram.controller.settings.phy,
self.sdram.controller.settings.timing))
f.close()
# Load ---------------------------------------------------------------------------------------------
def load():
prog = VivadoProgrammer()
prog.load_bitstream("build/gateware/top.bit")
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description="Minimal Arty DDR3 Design for tests with Project X-Ray")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
args = parser.parse_args()
if args.load:
load()
soc = BaseSoC()
builder = Builder(soc, output_dir="build", csr_csv="csr.csv")
builder.build(run=args.build)
soc.generate_sdram_phy_py_header()
if __name__ == "__main__":
main()
| isc | -8,789,639,174,733,908,000 | 32.920635 | 100 | 0.535564 | false | 3.546888 | false | false | false |
kapil1garg/eecs338-chris-jones | show_query.py | 1 | 3440 | import json
import elastic
from operator import itemgetter
from default_query import DefaultQuery
class ShowQuery(DefaultQuery):
"""
Handles ES queries related to shows
"""
def __init__(self):
DefaultQuery.__init__(self)
def generate_response_best_show(self, query, annotated_query):
# find document id with max polarity
payload = {
'_source': ['documentSentiment.polarity'],
'query': {
'bool': {
'must': [{
'match': {
'Full text:': p
}}
for p in annotated_query.shows]
}
}
}
r = json.loads(elastic.search(elastic.ES_URL, '/flattened-articles/_search', payload))['hits']['hits']
polarities = [(i['_id'], i['_source']['documentSentiment']['polarity']) for i in r]
id_max_polarity = max(polarities, key=itemgetter(1))[0]
# return sentence from document id that contains show in a sentence
payload = {
'_source': ['sentences.content', 'Full text:', 'ProQ:'],
'query': {
'bool': {
'must': [{
'ids': {
'values': [id_max_polarity]
}},
{'nested': {
'path': 'sentences',
'query': {
'bool': {
'must': [{'match': {'sentences.content': p}} for p in annotated_query.shows]
}
},
'inner_hits': {}
}}]
}
}
}
r = json.loads(elastic.search(elastic.ES_URL, '/flattened-articles/_search', payload))['hits']['hits']
r = [(i['inner_hits']['sentences']['hits'], i['_source']['ProQ:'], i['_source']['Full text:']) for i in r]
return self.format_response(r[0])
def generate_response_person_in_show(self, query, annotated_query):
match_queries = [{
'match': {
'Full text:': show
}
}
for show in annotated_query.shows
]
match_queries.append({
'nested': {
'path': 'sentences',
'query': {
'bool': {
'must': [{
'match': {
'sentences.content': p
}
}
for p in annotated_query.people
]
}
},
'inner_hits': {}
}
})
payload = {
'_source': ['sentences.content', 'Full text:', 'ProQ:'],
'query': {
'bool': {
'must': match_queries
}
}
}
r = json.loads(elastic.search(elastic.ES_URL, '/flattened-articles/_search', payload))
print r
r = r['hits']['hits']
r = [(i['inner_hits']['sentences']['hits'], i['_source']['ProQ:'], i['_source']['Full text:']) for i in r]
return self.format_response(r[0])
| mit | 2,222,587,426,113,204,000 | 34.102041 | 117 | 0.386628 | false | 4.971098 | false | false | false |
jakevdp/altair | altair/utils/server.py | 1 | 4035 | """
A Simple server used to show altair graphics from a prompt or script.
This is adapted from the mpld3 package; see
https://github.com/mpld3/mpld3/blob/master/mpld3/_server.py
"""
import sys
import threading
import webbrowser
import socket
import itertools
import random
from ._py3k_compat import server, IO
JUPYTER_WARNING = """
Note: if you're in the Jupyter notebook, Chart.serve() is not the best
way to view plots. Consider using Chart.display().
You must interrupt the kernel to cancel this command.
"""
# Mock server used for testing
class MockRequest(object):
def makefile(self, *args, **kwargs):
return IO(b"GET /")
def sendall(self, response):
pass
class MockServer(object):
def __init__(self, ip_port, Handler):
Handler(MockRequest(), ip_port[0], self)
def serve_forever(self):
pass
def server_close(self):
pass
def generate_handler(html, files=None):
if files is None:
files = {}
class MyHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
"""Respond to a GET request."""
if self.path == '/':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(html.encode())
elif self.path in files:
content_type, content = files[self.path]
self.send_response(200)
self.send_header("Content-type", content_type)
self.end_headers()
self.wfile.write(content.encode())
else:
self.send_error(404)
return MyHandler
def find_open_port(ip, port, n=50):
"""Find an open port near the specified port"""
ports = itertools.chain((port + i for i in range(n)),
(port + random.randint(-2 * n, 2 * n)))
for port in ports:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = s.connect_ex((ip, port))
s.close()
if result != 0:
return port
raise ValueError("no open ports found")
def serve(html, ip='127.0.0.1', port=8888, n_retries=50, files=None,
jupyter_warning=True, open_browser=True, http_server=None):
"""Start a server serving the given HTML, and (optionally) open a browser
Parameters
----------
html : string
HTML to serve
ip : string (default = '127.0.0.1')
ip address at which the HTML will be served.
port : int (default = 8888)
the port at which to serve the HTML
n_retries : int (default = 50)
the number of nearby ports to search if the specified port is in use.
files : dictionary (optional)
dictionary of extra content to serve
jupyter_warning : bool (optional)
if True (default), then print a warning if this is used within Jupyter
open_browser : bool (optional)
if True (default), then open a web browser to the given HTML
http_server : class (optional)
optionally specify an HTTPServer class to use for showing the
figure. The default is Python's basic HTTPServer.
"""
port = find_open_port(ip, port, n_retries)
Handler = generate_handler(html, files)
if http_server is None:
srvr = server.HTTPServer((ip, port), Handler)
else:
srvr = http_server((ip, port), Handler)
if jupyter_warning:
try:
__IPYTHON__ # noqa
except:
pass
else:
print(JUPYTER_WARNING)
# Start the server
print("Serving to http://{}:{}/ [Ctrl-C to exit]".format(ip, port))
sys.stdout.flush()
if open_browser:
# Use a thread to open a web browser pointing to the server
b = lambda: webbrowser.open('http://{}:{}'.format(ip, port))
threading.Thread(target=b).start()
try:
srvr.serve_forever()
except (KeyboardInterrupt, SystemExit):
print("\nstopping Server...")
srvr.server_close()
| bsd-3-clause | 4,777,088,075,087,904,000 | 29.11194 | 78 | 0.603965 | false | 3.887283 | false | false | false |
CWDoherty/Baseball | Scripts/hashtags.py | 1 | 1999 | '''
Copyright (c) 2015 Chris Doherty, Oliver Nabavian
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import mysql.connector, re
config = {
'user': 'root',
'password': 'isles40',
'host': '127.0.0.1',
'database': 'baseballdb'
}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor(buffered=True)
tweets = ("SELECT message, user_id, tweet_id FROM Tweet")
cursor.execute(tweets)
tweet_list = []
count = 0
for c in cursor:
if '#' in c[0]:
tweet_list.append(c)
find_tags = re.compile("\S*#(?:\S+)")
all_tag = []
for t in tweet_list:
tags = re.findall(find_tags, t[0])
if(len(tags) > 0):
all_tag.append([tags, t[1], t[2]])
insert = ("INSERT INTO Hashtag(tag, user_id, tweet_id) VALUES (%s, %s, %s)")
query = []
for a in all_tag:
for x in a[0]:
temp = [x, a[1], a[2]]
query.append(temp)
print query
for x in range(len(query)):
try:
cursor.execute(insert, query[x])
cnx.commit()
except:
# Duplicate entries will not make it into the database
continue
cursor.close()
cnx.close()
| mit | -4,705,462,326,985,644,000 | 26.013514 | 77 | 0.722361 | false | 3.309603 | false | false | false |
karesansui/karesansui | karesansui/gadget/hostby1staticroute.py | 1 | 6419 | # -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import re
import web
import simplejson as json
import karesansui
from karesansui.lib.rest import Rest, auth
from karesansui.db.access.machine import findbyhost1
from karesansui.lib.checker import Checker, \
CHECK_EMPTY, CHECK_VALID, CHECK_LENGTH, \
CHECK_CHAR, CHECK_MIN, CHECK_MAX, CHECK_ONLYSPACE, \
CHECK_UNIQUE
from karesansui.lib.utils import is_param, is_empty, preprint_r, \
base64_encode, get_ifconfig_info
from karesansui.lib.networkaddress import NetworkAddress
from karesansui.lib.parser.staticroute import staticrouteParser as Parser
from karesansui.lib.conf import read_conf, write_conf
def validates_staticroute(obj):
checker = Checker()
check = True
_ = obj._
checker.errors = []
if not is_param(obj.input, 'target'):
check = False
checker.add_error(_('Specify target address for the route.'))
else:
check = checker.check_ipaddr(
_('Target'),
obj.input.target,
CHECK_EMPTY | CHECK_VALID,
) and check
if not is_param(obj.input, 'gateway'):
check = False
checker.add_error(_('Specify gateway address for the route.'))
else:
check = checker.check_ipaddr(
_('Gateway'),
obj.input.gateway,
CHECK_VALID,
) and check
obj.view.alert = checker.errors
return check
class HostBy1StaticRoute(Rest):
@auth
def _GET(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
host = findbyhost1(self.orm, host_id)
self.view.host_id = host_id
# unremovable entries
excludes = {
"device": ["^peth","^virbr","^sit","^xenbr","^lo","^br"],
"ipaddr": ["^0\.0\.0\.0$", "^169\.254\.0\.0$"],
}
devices = []
phydev_regex = re.compile(r"^eth[0-9]+")
for dev,dev_info in get_ifconfig_info().iteritems():
if phydev_regex.match(dev):
try:
if dev_info['ipaddr'] is not None:
devices.append(dev)
net = NetworkAddress("%s/%s" % (dev_info['ipaddr'],dev_info['mask'],))
excludes['ipaddr'].append(net.network)
except:
pass
self.view.devices = devices
parser = Parser()
status = parser.do_status()
routes = {}
for _k,_v in status.iteritems():
for _k2,_v2 in _v.iteritems():
name = base64_encode("%s@%s" % (_k2,_k,))
routes[name] = {}
routes[name]['name'] = name
routes[name]['device'] = _k
routes[name]['gateway'] = _v2['gateway']
routes[name]['flags'] = _v2['flags']
routes[name]['ref'] = _v2['ref']
routes[name]['use'] = _v2['use']
net = NetworkAddress(_k2)
routes[name]['ipaddr'] = net.ipaddr
routes[name]['netlen'] = net.netlen
routes[name]['netmask'] = net.netmask
removable = True
for _ex_key,_ex_val in excludes.iteritems():
ex_regex = "|".join(_ex_val)
mm = re.search(ex_regex,routes[name][_ex_key])
if mm:
removable = False
routes[name]['removable'] = removable
self.view.routes = routes
if self.is_mode_input():
pass
return True
@auth
def _POST(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
host = findbyhost1(self.orm, host_id)
if not validates_staticroute(self):
return web.badrequest(self.view.alert)
modules = ["staticroute"]
dop = read_conf(modules, self, host)
if dop is False:
return web.internalerror('Internal Server Error. (Timeout)')
target = self.input.target
net = NetworkAddress(target)
ipaddr = net.ipaddr
netmask = net.netmask
netlen = net.netlen
network = net.network
target = "%s/%s" % (ipaddr,netlen,)
gateway = self.input.gateway
device = self.input.device
dop.set("staticroute", [device,target], gateway)
from karesansui.lib.parser.staticroute import PARSER_COMMAND_ROUTE
if net.netlen == 32:
command = "%s add -host %s gw %s dev %s" % (PARSER_COMMAND_ROUTE,ipaddr,gateway,device,)
command = "%s add -host %s dev %s" % (PARSER_COMMAND_ROUTE,ipaddr,device,)
else:
command = "%s add -net %s netmask %s gw %s dev %s" % (PARSER_COMMAND_ROUTE,network,netmask,gateway,device,)
extra_args = {"post-command": command}
retval = write_conf(dop, self, host, extra_args=extra_args)
if retval is False:
return web.internalerror('Internal Server Error. (Adding Task)')
return web.accepted(url=web.ctx.path)
urls = (
'/host/(\d+)/staticroute[/]?(\.html|\.part|\.json)?$', HostBy1StaticRoute,
)
| mit | 270,860,576,896,063,070 | 33.326203 | 119 | 0.581087 | false | 3.871532 | false | false | false |
dlu-ch/dlb | test/dlb_contrib/test_git.py | 1 | 21925 | # SPDX-License-Identifier: LGPL-3.0-or-later
# dlb - a Pythonic build tool
# Copyright (C) 2020 Daniel Lutz <[email protected]>
import testenv # also sets up module search paths
import dlb.di
import dlb.fs
import dlb.ex
import dlb_contrib.generic
import dlb_contrib.git
import dlb_contrib.sh
import os.path
import tempfile
import subprocess
import re
import unittest
class PrepareGitRepo(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = """
git init
git config user.email "[email protected]"
git config user.name "dlu-ch"
git add .dlbroot/o
echo .dlbroot/ > .gitignore
echo x > x
git add x .gitignore
git commit -m 'Initial commit'
echo x >> x
git commit -a -m 'Enlarge x'
git tag -a v1.2.3c4 -m 'Release'
echo x >> x
git commit -a -m 'Enlarge x even further'
mkdir d
echo y > d/y
git add d/y
echo z > d/z
git add d/z
echo a > 'a -> b'
git add 'a -> b'
git commit -m 'Add files'
git mv x 'y -> z'
git mv 'a -> b' c
git mv d e
git mv e/y why
echo u > e/u
"""
# each annotated tag starting with 'v' followed by a decimal digit must match this (after 'v'):
VERSION_REGEX = re.compile(
r'^'
r'(?P<major>0|[1-9][0-9]*)\.(?P<minor>0|[1-9][0-9]*)\.(?P<micro>0|[1-9][0-9]*)'
r'((?P<post>[abc])(?P<post_number>0|[1-9][0-9]*))?'
r'$')
class ModificationsFromStatusTest(unittest.TestCase):
def test_branch_header(self):
lines = [
'# branch.oid b5fb8c02a485f9f7a5d4aee95848bf9c9d2b0f7f',
'# branch.head "äüä"',
'# branch.upstream origin/master',
'# branch.ab +12 -3'
]
_, _, branch_refname, upstream_branch_refname, before_upstream, behind_upstream = \
dlb_contrib.git.modifications_from_status(lines)
self.assertEqual('refs/heads/"äüä"', branch_refname)
self.assertEqual('refs/remotes/origin/master', upstream_branch_refname)
self.assertEqual((12, 3), (before_upstream, behind_upstream))
lines = [
'# branch.oid b5fb8c02a485f9f7a5d4aee95848bf9c9d2b0f7f',
'# branch.head (detached)'
]
_, _, branch_refname, upstream_branch_refname, before_upstream, behind_upstream = \
dlb_contrib.git.modifications_from_status(lines)
self.assertEqual('refs/heads/(detached)', branch_refname) # is ambiguous
self.assertIsNone(upstream_branch_refname)
self.assertIsNone(before_upstream)
self.assertIsNone(behind_upstream)
def test_single_non_header_line(self):
line = (
'1 .M N... 100644 100644 100644 '
'd8755f8b2ede3dc58822895fa85e0e51c8f20dda d8755f8b2ede3dc58822895fa85e0e51c8f20dda jöö/herzig'
)
self.assertEqual({dlb.fs.Path('jöö/herzig'): (' M', None)},
dlb_contrib.git.modifications_from_status([line])[0])
line = (
'1 A. N... 000000 100644 100644 '
'0000000000000000000000000000000000000000 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 "a\\tb\\nc\\"\'d "'
)
self.assertEqual({dlb.fs.Path('a\tb\nc"\'d '): ('A ', None)},
dlb_contrib.git.modifications_from_status([line])[0])
line = (
'2 R. N... 100644 100644 100644 '
'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 R100 a\tb'
)
self.assertEqual({dlb.fs.Path('b'): ('R ', dlb.fs.Path('a'))},
dlb_contrib.git.modifications_from_status([line])[0])
line = (
'2 R. N... 100644 100644 100644 '
'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 R100 "a\\"b"\ta -> b'
)
self.assertEqual({dlb.fs.Path('a -> b'): ('R ', dlb.fs.Path('a"b'))},
dlb_contrib.git.modifications_from_status([line])[0])
line = (
'2 R. N... 100644 100644 100644 '
'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 R100 '
'a\t"a\\tb\\nc\\"\'d "'
)
self.assertEqual({dlb.fs.Path('a\tb\nc"\'d '): ('R ', dlb.fs.Path('a'))},
dlb_contrib.git.modifications_from_status([line])[0])
self.assertEqual({dlb.fs.Path('a')},
dlb_contrib.git.modifications_from_status(['? a'])[1])
self.assertEqual({dlb.fs.Path('a\tb\nc"\'d ')},
dlb_contrib.git.modifications_from_status(['? "a\\tb\\nc\\"\'d "'])[1])
def test_fails_on_invalid_line(self):
with self.assertRaises(ValueError):
dlb_contrib.git.modifications_from_status(['# branch.ab +0'])
with self.assertRaises(ValueError):
dlb_contrib.git.modifications_from_status(['1 A.'])
with self.assertRaises(ValueError):
dlb_contrib.git.modifications_from_status(['2 R.'])
class CheckRefNameTest(unittest.TestCase):
def test_empty_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('')
self.assertEqual(str(cm.exception), 'refname component must not be empty')
def test_single_slashes_are_valid(self):
dlb_contrib.git.check_refname('a/b/c')
def test_consecutive_slashes_are_valid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a//b')
self.assertEqual(str(cm.exception), 'refname component must not be empty')
def test_single_dot_in_the_middle_is_valid(self):
dlb_contrib.git.check_refname('a/b.c')
def test_at_at_certain_position_is_valid(self):
dlb_contrib.git.check_refname('a/{@}/b')
def test_single_at_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a/@/b')
self.assertEqual(str(cm.exception), "refname component must not be '@'")
def test_at_followed_by_brace_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a@{b')
self.assertEqual(str(cm.exception), "refname component must not contain '@{'")
def test_double_dot_in_the_middle_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a/b..c')
self.assertEqual(str(cm.exception), "refname component must not contain '..'")
def test_control_character_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a\0b')
self.assertEqual(str(cm.exception), "refname component must not contain ASCII control character")
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a\nb')
self.assertEqual(str(cm.exception), "refname component must not contain ASCII control character")
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a\x7Fb')
self.assertEqual(str(cm.exception), "refname component must not contain ASCII control character")
class DescribeWorkingDirectory(dlb_contrib.git.GitDescribeWorkingDirectory):
SHORTENED_COMMIT_HASH_LENGTH = 8 # number of characters of the SHA1 commit hash in the *wd_version*
# working directory version
# examples: '1.2.3', '1.2.3c4-dev5+deadbeef?'
wd_version = dlb.ex.output.Object(explicit=False)
# tuple of the version according to the version tag
version_components = dlb.ex.output.Object(explicit=False)
async def redo(self, result, context):
await super().redo(result, context)
shortened_commit_hash_length = min(40, max(1, int(self.SHORTENED_COMMIT_HASH_LENGTH)))
version = result.tag_name[1:]
m = VERSION_REGEX.fullmatch(version)
if not m:
raise ValueError(f'annotated tag is not a valid version number: {result.tag_name!r}')
wd_version = version
if result.commit_number_from_tag_to_latest_commit:
wd_version += f'-dev{result.commit_number_from_tag_to_latest_commit}' \
f'+{result.latest_commit_hash[:shortened_commit_hash_length]}'
if result.has_changes_in_tracked_files:
wd_version += '?'
result.wd_version = wd_version
result.version_components = (
int(m.group('major')), int(m.group('minor')), int(m.group('micro')),
m.group('post'), None if m.group('post_number') is None else int(m.group('post_number'))
)
return True
@unittest.skipIf(not testenv.has_executable_in_path('git'), 'requires git in $PATH')
@unittest.skipIf(not testenv.has_executable_in_path('sh'), 'requires sh in $PATH')
class GitDescribeWorkingDirectoryTest(testenv.TemporaryWorkingDirectoryTestCase):
def test_line_output(self):
with dlb.ex.Context():
class AddLightWeightTag(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = 'git tag v2' # light-weight tag does not affect 'git describe'
PrepareGitRepo().start().complete()
AddLightWeightTag().start().complete()
result = DescribeWorkingDirectory().start()
dlb.di.inform(f"version: {result.version_components!r}, wd version: {result.wd_version!r}")
dlb.di.inform(f"changed: {result.modification_by_file.keys()!r}")
self.assertEqual({
dlb.fs.Path('a -> b'): ('R ', dlb.fs.Path('c')),
dlb.fs.Path('d/y'): ('R ', dlb.fs.Path('why')),
dlb.fs.Path('d/z'): ('R ', dlb.fs.Path('e/z')),
dlb.fs.Path('x'): ('R ', dlb.fs.Path('y -> z'))
}, result.modification_by_file)
self.assertEqual({dlb.fs.Path('e/u')}, result.untracked_files)
self.assertEqual((1, 2, 3, 'c', 4), result.version_components)
self.assertRegex(result.wd_version, r'1\.2\.3c4-dev2\+[0-9a-f]{8}\?$')
self.assertEqual('refs/heads/master', result.branch_refname)
with dlb.ex.Context():
class CommitGitRepo(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = 'git commit -a -m 0'
CommitGitRepo().start()
result = DescribeWorkingDirectory().start()
self.assertEqual({}, result.modification_by_file)
self.assertEqual({dlb.fs.Path('e/u')}, result.untracked_files)
self.assertEqual((1, 2, 3, 'c', 4), result.version_components)
self.assertRegex(result.wd_version, r'1\.2\.3c4-dev3\+[0-9a-f]{8}$')
with dlb.ex.Context():
class CheckoutBranch(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = 'git checkout -f -b "(detached)"'
CheckoutBranch().start()
result = DescribeWorkingDirectory().start()
self.assertEqual('refs/heads/(detached)', result.branch_refname)
self.assertRegex(result.wd_version, r'1\.2\.3c4-dev3\+[0-9a-f]{8}$')
with dlb.ex.Context():
class CheckoutDetached(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = 'git checkout --detach'
CheckoutDetached().start()
result = DescribeWorkingDirectory().start()
self.assertIsNone(result.branch_refname)
self.assertRegex(result.wd_version, r'1\.2\.3c4-dev3\+[0-9a-f]{8}$')
def test_gitignore_can_hide_every_modification(self):
class PrepareRepoWithHiddenModifications(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = """
git init
git config user.email "[email protected]"
git config user.name "dlu-ch"
echo x > x
git add x
git commit -m 'Initial commit'
git tag -a v0.0.0 -m 'Initial tag'
echo .gitignore > .gitignore
echo .dlbroot >> .gitignore
echo ignored >> .gitignore
touch ignored
"""
with dlb.ex.Context():
PrepareRepoWithHiddenModifications().start().complete()
result = DescribeWorkingDirectory().start()
self.assertEqual({}, result.modification_by_file)
class DefaultVersionTagTest(unittest.TestCase):
REGEX = re.compile(dlb_contrib.git.GitCheckTags.ANNOTATED_TAG_NAME_REGEX)
def test_fails_for_empty(self):
self.assertFalse(self.REGEX.fullmatch(''))
def test_fails_for_missing_v(self):
self.assertFalse(self.REGEX.fullmatch('1.2.3'))
def test_fails_for_leading_zero(self):
self.assertFalse(self.REGEX.fullmatch('v01.2.3'))
self.assertFalse(self.REGEX.fullmatch('v1.02.3'))
self.assertFalse(self.REGEX.fullmatch('v1.02.03'))
def test_matches_dotted_integers(self):
self.assertTrue(self.REGEX.fullmatch('v1'))
self.assertTrue(self.REGEX.fullmatch('v1.2'))
self.assertTrue(self.REGEX.fullmatch('v1.2.3'))
self.assertTrue(self.REGEX.fullmatch('v1.20.345.6789'))
self.assertTrue(self.REGEX.fullmatch('v0.0.0'))
def test_fails_without_trailing_decimal_digit(self):
self.assertFalse(self.REGEX.fullmatch('v1.2.3pre'))
def test_matches_dotted_integers_with_suffix(self):
self.assertTrue(self.REGEX.fullmatch('v1.2.3a4'))
self.assertTrue(self.REGEX.fullmatch('v1.2.3rc0'))
self.assertTrue(self.REGEX.fullmatch('v1.2.3patch747'))
@unittest.skipIf(not testenv.has_executable_in_path('git'), 'requires git in $PATH')
@unittest.skipIf(not testenv.has_executable_in_path('sh'), 'requires sh in $PATH')
class GitCheckTagsTest(testenv.TemporaryWorkingDirectoryTestCase):
def test_local_only(self):
class GitCheckTags(dlb_contrib.git.GitCheckTags):
REMOTE_NAME_TO_SYNC_CHECK = ''
class GitCheckTags2(GitCheckTags):
LIGHTWEIGHT_TAG_NAME_REGEX = 'latest_.*'
with dlb.ex.Context():
PrepareGitRepo().start().complete()
subprocess.check_output(['git', 'tag', '-a', 'v2.0.0', '-m', 'Release'])
subprocess.check_output(['git', 'tag', 'vw'])
result = GitCheckTags().start()
self.assertEqual({'v1.2.3c4', 'v2.0.0'}, set(result.commit_by_annotated_tag_name))
self.assertEqual({'vw'}, set(result.commit_by_lightweight_tag_name))
with dlb.ex.Context():
output = subprocess.check_output(['git', 'rev-parse', 'v1.2.3c4^{}', 'v2.0.0^{}', 'vw'])
commit_hashes = output.decode().splitlines()
self.assertEqual({
'v1.2.3c4': commit_hashes[0],
'v2.0.0': commit_hashes[1]
}, result.commit_by_annotated_tag_name)
self.assertEqual({
'vw': commit_hashes[2]
}, result.commit_by_lightweight_tag_name)
with dlb.ex.Context():
subprocess.check_output(['git', 'tag', 'v2'])
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "name of lightweight tag does match 'ANNOTATED_TAG_NAME_REGEX': 'v2'"
self.assertEqual(msg, str(cm.exception))
with dlb.ex.Context():
subprocess.check_output(['git', 'tag', '-d', 'v2'])
subprocess.check_output(['git', 'tag', '-a', 'v_3.0', '-m', 'Release'])
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "name of annotated tag does not match 'ANNOTATED_TAG_NAME_REGEX': 'v_3.0'"
self.assertEqual(msg, str(cm.exception))
with dlb.ex.Context():
subprocess.check_output(['git', 'tag', '-d', 'v_3.0'])
with self.assertRaises(ValueError) as cm:
GitCheckTags2().start().complete()
msg = "name of lightweight tag does not match 'LIGHTWEIGHT_TAG_NAME_REGEX': 'vw'"
self.assertEqual(msg, str(cm.exception))
def test_remote_too(self):
class GitCheckTags(dlb_contrib.git.GitCheckTags):
pass
class GitCheckTags2(GitCheckTags):
DO_SYNC_CHECK_LIGHTWEIGHT_TAGS = True
origin_repo_dir = os.path.abspath(tempfile.mkdtemp())
with testenv.DirectoryChanger(origin_repo_dir):
subprocess.check_output(['git', 'init'])
subprocess.check_output(['git', 'config', 'user.email', '[email protected]'])
subprocess.check_output(['git', 'config', 'user.name', 'user.name'])
subprocess.check_output(['touch', 'x'])
subprocess.check_output(['git', 'add', 'x'])
subprocess.check_output(['git', 'commit', '-m', 'Initial commit'])
subprocess.check_output(['git', 'tag', '-a', 'v1.2.3c4', '-m', 'Release'])
subprocess.check_output(['touch', 'y'])
subprocess.check_output(['git', 'add', 'y'])
subprocess.check_output(['git', 'commit', '-m', 'Add y'])
subprocess.check_output(['git', 'tag', '-a', 'v2.0.0', '-m', 'Release'])
subprocess.check_output(['git', 'tag', '-a', 'v2.0.1', '-m', 'Release'])
subprocess.check_output(['git', 'tag', 'vm'])
subprocess.check_output(['git', 'tag', 'v'])
subprocess.check_output(['git', 'tag', 'w'])
subprocess.check_output(['git', 'init'])
subprocess.check_output(['touch', 'x'])
subprocess.check_output(['git', 'add', 'x'])
subprocess.check_output(['git', 'commit', '-m', 'Initial commit'])
subprocess.check_output(['git', 'remote', 'add', 'origin', origin_repo_dir])
subprocess.check_output(['git', 'fetch'])
subprocess.check_output(['git', 'fetch', '--tags'])
with dlb.ex.Context():
GitCheckTags().start()
with dlb.ex.Context():
subprocess.check_output(['git', 'tag', '-d', 'vm'])
subprocess.check_output(['git', 'tag', '-d', 'v'])
GitCheckTags().start() # do not sync lightweight tags by default
with self.assertRaises(ValueError) as cm:
GitCheckTags2().start().complete()
msg = "remote tags missing locally: 'v', 'vm'"
self.assertEqual(msg, str(cm.exception))
subprocess.check_output(['git', 'tag', '-d', 'v1.2.3c4'])
subprocess.check_output(['git', 'tag', '-d', 'v2.0.1'])
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "remote tags missing locally: 'v1.2.3c4', 'v2.0.1'"
self.assertEqual(msg, str(cm.exception))
subprocess.check_output(['git', 'tag', '-a', 'v1.2.3c4', '-m', 'Release']) # different commit
subprocess.check_output(['git', 'tag', '-a', 'v2.0.1', '-m', 'Release']) # different commit
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "tags for different commits locally and remotely: 'v1.2.3c4', 'v2.0.1'"
self.assertEqual(msg, str(cm.exception))
subprocess.check_output(['git', 'tag', '-a', 'v3.0.0', '-m', 'Release'])
subprocess.check_output(['git', 'tag', '-a', 'v3.0.1', '-m', 'Release'])
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "local tags missing on remotely: 'v3.0.0', 'v3.0.1'"
self.assertEqual(msg, str(cm.exception))
def test_example(self):
origin_repo_dir = os.path.abspath(tempfile.mkdtemp())
with testenv.DirectoryChanger(origin_repo_dir):
subprocess.check_output(['git', 'init'])
subprocess.check_output(['git', 'config', 'user.email', '[email protected]'])
subprocess.check_output(['git', 'config', 'user.name', 'user.name'])
subprocess.check_output(['touch', 'x'])
subprocess.check_output(['git', 'add', 'x'])
subprocess.check_output(['git', 'commit', '-m', 'Initial commit'])
subprocess.check_output(['git', 'tag', '-a', 'v1.2.3', '-m', 'Release'])
subprocess.check_output(['git', 'init'])
subprocess.check_output(['git', 'remote', 'add', 'origin', origin_repo_dir])
subprocess.check_output(['git', 'fetch'])
subprocess.check_output(['git', 'fetch', '--tags'])
with dlb.ex.Context():
class GitCheckTags(dlb_contrib.git.GitCheckTags):
ANNOTATED_TAG_NAME_REGEX = r'v(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*)){2}' # e.g. 'v1.23.0'
version_tag_names = set(GitCheckTags().start().commit_by_annotated_tag_name)
self.assertEquals({'v1.2.3'}, version_tag_names)
@unittest.skipIf(not testenv.has_executable_in_path('git'), 'requires git in $PATH')
class VersionTest(testenv.TemporaryWorkingDirectoryTestCase):
def test_version_is_string_with_dot(self):
# noinspection PyPep8Naming
Tools = [
dlb_contrib.git.GitDescribeWorkingDirectory,
dlb_contrib.git.GitCheckTags
]
class QueryVersion(dlb_contrib.generic.VersionQuery):
VERSION_PARAMETERS_BY_EXECUTABLE = {
Tool.EXECUTABLE: Tool.VERSION_PARAMETERS
for Tool in Tools
}
with dlb.ex.Context():
version_by_path = QueryVersion().start().version_by_path
self.assertEqual(len(QueryVersion.VERSION_PARAMETERS_BY_EXECUTABLE), len(version_by_path))
for Tool in Tools:
path = dlb.ex.Context.active.helper[Tool.EXECUTABLE]
version = version_by_path[path]
self.assertIsInstance(version, str)
self.assertGreaterEqual(version.count('.'), 2)
| gpl-3.0 | 9,056,215,447,418,423,000 | 42.39604 | 116 | 0.592015 | false | 3.456079 | true | false | false |
LCOGT/whatsup | whatsup/urls.py | 1 | 1040 | """
WhatsUP: astronomical object suggestions for Las Cumbres Observatory Global Telescope Network
Copyright (C) 2014-2015 LCOGT
urls.py
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
from django.urls import include, path
from . import views
urlpatterns = [
path('', views.api_root, name='apiroot'),
path('target/',views.TargetDetailView.as_view(), name="api_target"),
path('search/v2/', views.TargetListView.as_view(), name="api_v2_search"),
path('search/', views.TargetListView.as_view(), name="api_search"),
path('range/', views.TargetListRangeView.as_view(), name="api_range"),
]
| gpl-3.0 | -5,789,981,566,066,620,000 | 36.142857 | 93 | 0.749038 | false | 3.823529 | false | false | false |
holytortoise/abwreservierung | src/reservierung/views.py | 1 | 19052 | from django.shortcuts import render
from django.views.generic import TemplateView, ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView, FormView
from django.views.generic.dates import WeekArchiveView
from django.urls import reverse_lazy, reverse
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django import forms as d_forms
import datetime
from . import forms
from . import models
# Create your views here.
class ReservierungList(ListView):
queryset = models.Reservierung.objects.order_by('anfangsDatum','anfangsZeit')
context_object_name = 'reservierungen'
class ReservierungUpdate(LoginRequiredMixin, UpdateView):
login_url = 'account:login'
redirect_field_name = 'redirect_to'
model = models.Reservierung
fields = ['reserviert_für','reservierterRaum', 'reservierungsGrund', 'anfangsDatum',
'endDatum', 'anfangsZeit', 'endZeit']
class ReservierungDelete(LoginRequiredMixin, DeleteView):
login_url = 'account:login'
redirect_field_name = 'redirect_to'
model = models.Reservierung
success_url = reverse_lazy('reservierung:reservierung-list')
template_name = 'reservierung/reservierung_delete.html'
class ReservierungDetail(DetailView):
model = models.Reservierung
context_object_name = 'reservierung'
template_name = 'reservierung/reservierung_detail.html'
# View für das Darstellen der Reservierungen für die aktuelle Woche
def index(request):
"""
Diese Funktion stellt auf der Index Seite die Tabelle für die aktuelle
Woche. Und ermöglicht Durch die Wochen zu gehen
"""
current_week = datetime.date.today().isocalendar()[1]
current_year = datetime.date.today().isocalendar()[0]
is_week = None
if request.method == 'POST':
jahr = int(request.POST['jahr'])
woche = int(request.POST['woche'])
# Wurde der rechte Button für nächste Woche gedrückt wird woche um 1
# hochgezählt
if request.POST.__contains__('next_week'):
if woche == datetime.date(jahr, 12, 28).isocalendar()[1]:
woche = 1
jahr = jahr + 1
else:
woche = woche + 1
# Wurde der linke Button gedrückt wird Woche heruntergezählt
if request.POST.__contains__('last_week'):
if woche == 1:
jahr = jahr -1
woche = datetime.date(jahr,12,28).isocalendar()[1]
else:
woche = woche - 1
else:
jahr = datetime.date.today().isocalendar()[0]
woche = datetime.date.today().isocalendar()[1]
# Ergibt True wenn die aktuelle Woche gleich der auf dem Schild angezeigten ist
if woche == current_week and jahr == current_year:
is_week = True
if woche != current_week or jahr != current_year:
is_week = False
# Erzeuge daten für die Aktuelle Woche
datum = str(jahr)+'-W'+str(woche)
r = datetime.datetime.strptime(datum + '-0', "%Y-W%W-%w")
start = r - datetime.timedelta(days=r.weekday())
end = start + datetime.timedelta(days=6)
start = start.strftime('%d.%m')
end = end.strftime('%d.%m')
rooms = models.Raum.objects.all()
rooms_return = []
for room in rooms:
room_return = []
reservierungen = models.Reservierung.objects.filter(
reservierterRaum=room).order_by('anfangsDatum')
for reservierung in reservierungen:
if reservierung.anfangsDatum.isocalendar()[1] < woche and woche < reservierung.endDatum.isocalendar()[1]:
room_return.append(reservierung)
if ((reservierung.anfangsDatum.isocalendar()[1] == woche and reservierung.anfangsDatum.isocalendar()[0] == jahr)
or (reservierung.endDatum.isocalendar()[1] == woche and reservierung.endDatum.isocalendar()[0] == jahr)):
room_return.append(reservierung)
if len(room_return) != 0:
rooms_return.append(room_return)
if len(rooms_return) == 0:
rooms_return = None
context_dict = {'rooms_return':rooms_return,'reserv':reservierungen,
'woche':woche,'jahr':jahr,'current_week':current_week,
'current_year':current_year,'is_week':is_week,'start':start,'end':end}
return render(request, 'index.html', context_dict)
# View um Reservierungen zu erstellen
@login_required(login_url='account:login')
def reservierung_form(request):
"""
Diese Funktion ist für die neuen Reservierungen zuständig.
Sie Überprüft ob der Raum für den gewünschten Zeitraum zur verfügung steht.
Wenn ja wird eine neue Reservierung angelegt und der Nutzer wird zur Index
seite Umgeleitet. Wenn nein dann werden dem Nutzer alternative Räume
vorgeschlagen, welche zum gewünschten Zeitpunkt frei sind.
"""
nutzer = request.user
free_rooms = None
reserv = None
moeglich = False
if request.method == 'POST':
form = forms.ReservierungForm(data=request.POST)
if form.is_valid():
free_rooms = []
reservierungen = models.Reservierung.objects.filter(
reservierterRaum=form.cleaned_data.get("reservierterRaum"))
if reservierungen.exists():
for reservierung in reservierungen:
if reservierung.täglich:
# liegt form.anfangsDatum in einer bereits bestehenden
# reservierung
if reservierung.anfangsDatum < form.cleaned_data.get("anfangsDatum") and form.cleaned_data.get("anfangsDatum") < reservierung.endDatum:
# ist die reservierung täglich
if form.cleaned_data.get("täglich"):
# liegt die r.endZeit vor f.anfangsZeit oder
# r.anfangsZeit nach f.endZeit
if reservierung.endZeit <= form.cleaned_data.get("anfangsZeit") or reservierung.anfangsZeit >= form.cleaned_data.get("endZeit"):
# trifft zu also reservierung möglich
moeglich = True
else:
moeglich = False
reserv = reservierung
break
else:
if reservierung.endZeit <= form.cleaned_data.get("anfangsZeit"):
moeglich = True
elif reservierung.anfangsZeit >= form.cleaned_data.get("endZeit"):
moeglich = True
else:
# reservierung ganztägig
# nicht möglich
moeglich = False
reserv = reservierung
break
else:
# liegt f.anfangsDatum nach r.endDatum
if reservierung.endDatum < form.cleaned_data.get("anfangsDatum"):
moeglich = True
# liegen r.endDatum und f.anfangsDatum auf den
# gleichen Tag
elif reservierung.endDatum == form.cleaned_data.get("anfangsDatum"):
# liegt die r.endZeit vor f.anfangsZeit
if reservierung.endZeit <= form.cleaned_data.get("anfangsZeit"):
# reservierung möglich
moeglich = True
# liegt r.anfangsZeit nach f.endZeit
elif reservierung.anfangsZeit >= form.cleaned_data.get("endZeit"):
# reservierung möglich
moeglich = True
else:
# Reservierung nicht möglich
moeglich = False
reserv = reservierung
break
# ist r.anfangsDatum und f.endDatum am gleichen Tag
elif reservierung.anfangsDatum == form.cleaned_data.get("endDatum"):
if reservierung.endZeit <= form.cleaned_data.get("anfangsZeit"):
# reservierung möglich
moeglich = True
elif reservierung.anfangsZeit >= form.cleaned_data.get("endZeit"):
# reservierung möglich
moeglich = True
else:
moeglich = False
reserv = reservierung
break
else:
if reservierung.anfangsDatum < form.cleaned_data.get("anfangsDatum") and form.cleaned_data.get("anfangsDatum") < reservierung.endDatum:
# fehlermeldung anzeigen
# verfügbare räume anzeigen
# reservierung die belegt anzeigen
moeglich = False
reserv = reservierung
break
else:
# aktuelle reservierungsende liegt vor dem beginn
# der neuen
if reservierung.endDatum < form.cleaned_data.get("anfangsDatum"):
moeglich = True
# reservierungsende und beginn der neuen gleicher
# tag
elif reservierung.endDatum == form.cleaned_data.get("anfangsDatum"):
# reservierungs zeit ende vor oder gleich der
# neuen anfangszeit
if reservierung.endZeit <= form.cleaned_data.get("anfangsZeit"):
moeglich = True
elif reservierung.anfangsZeit >= form.cleaned_data.get("endZeit"):
moeglich = True
else:
moeglich = False
reserv = reservierung
break
elif reservierung.anfangsDatum > form.cleaned_data.get("endDatum"):
moeglich = True
elif reservierung.anfangsDatum == form.cleaned_data.get("endDatum"):
if reservierung.anfangsZeit > form.cleaned_data.get("endZeit"):
moeglich = True
else:
moeglich = False
reserv = reservierung
break
else:
moeglich = True
if moeglich:
reserv = models.Reservierung()
reserv.reserviert_von = request.user
if form.cleaned_data.get("reserviertFür") == "":
reserv.reserviert_für = request.user.last_name
else:
reserv.reserviert_für = form.cleaned_data.get("reserviertFür")
reserv.reservierterRaum = models.Raum.objects.get(
id=form.cleaned_data.get("reservierterRaum"))
reserv.reservierungsGrund = form.cleaned_data.get(
"reservierungsGrund")
reserv.anfangsDatum = form.cleaned_data.get("anfangsDatum")
reserv.endDatum = form.cleaned_data.get("endDatum")
reserv.anfangsZeit = form.cleaned_data.get("anfangsZeit")
reserv.endZeit = form.cleaned_data.get("endZeit")
reserv.täglich = form.cleaned_data.get("täglich")
reserv.save()
return HttpResponseRedirect(reverse('reservierung:index'))
else:
# return free rooms
# restlichen reservierungen anschauen
rooms = models.Raum.objects.exclude(
id=form.cleaned_data.get("reservierterRaum"))
if rooms.exists():
for room in rooms:
room_reservs = models.Reservierung.objects.filter(
reservierterRaum=room)
# existieren reservierungen
if room_reservs.exists():
# für alle reservierungen
free_room = False
for room_reserv in room_reservs:
# liegt die reservierung in dem zeitraum einer
# bestehenden Reservierung
if form.cleaned_data.get("täglich"):
if room_reserv.anfangsDatum < form.cleaned_data.get("anfangsDatum") and form.cleaned_data.get("anfangsDatum") < room_reserv.endDatum:
if room_reserv.täglich:
if room_reserv.endZeit <= form.cleaned_data.get("anfangsZeit") or room_reserv.anfangsZeit > form.cleaned_data.get("endZeit"):
free_room = True
else:
free_room = False
break
else:
free_room = False
break
else:
if room_reserv.endDatum < form.cleaned_data.get("anfangsDatum"):
free_room = True
elif room_reserv.endDatum == form.cleaned_data.get("anfangsDatum"):
if room_reserv.endZeit <= form.cleaned_data.get("anfangsZeit"):
free_room = True
elif room_reserv.anfangsZeit >= form.cleaned_data.get("endZeit"):
free_room = True
else:
free_room = False
break
elif room_reserv.anfangsDatum == form.cleaned_data.get("endDatum"):
if room_reserv.endZeit <= form.cleaned_data.get("anfangsZeit"):
free_room = True
elif room_reserv.anfangsZeit >= form.cleaned_data.get("endZeit"):
free_room = True
else:
free_room = False
break
else:
if room_reserv.anfangsDatum < form.cleaned_data.get("anfangsDatum") and form.cleaned_data.get("anfangsDatum") < room_reserv.endDatum:
# ja, raum also nicht frei
free_room = False
break
else:
# nein, also raum eventuell frei,
# prüfen ob anfangsDatum nach oder am
# endDatum
if room_reserv.endDatum < form.cleaned_data.get("anfangsDatum"):
# Raum Frei
free_room = True
elif room_reserv.endDatum == form.cleaned_data.get("anfangsDatum"):
# gleicher Tag
if room_reserv.endZeit <= form.cleaned_data.get("anfangsZeit"):
# Raum Frei
free_room = True
else:
# Raum ist nicht Frei
free_room = False
break
elif room_reserv.anfangsDatum > form.cleaned_data.get("endDatum"):
# Raum Frei
free_room = True
elif room_reserv.anfangsDatum == form.cleaned_data.get("endDatum"):
if room_reserv.anfangsZeit > form.cleaned_data.get("endZeit"):
# Raum frei
free_room = True
else:
# Raum nicht Frei
free_room = False
break
if free_room:
free_rooms.append(room)
else:
free_rooms.append(room)
else:
free_rooms = models.Raum.objects.all()
else:
form = forms.ReservierungForm()
return render(request, 'reservierung/reservierung_form.html', {'form': form, 'reserv': reserv, 'free_rooms': free_rooms, })
# View zum anzeigen aller Reservierungen des angemeldeten nutzers
@login_required(login_url='account:login')
def reservierung_user(request):
user = request.user
rooms = models.Raum.objects.all()
rooms_return = []
for room in rooms:
room_return = []
reservierungen = models.Reservierung.objects.filter(
reservierterRaum=room).order_by('anfangsDatum')
for reservierung in reservierungen:
if reservierung.reserviert_von == user:
room_return.append(reservierung)
rooms_return.append(room_return)
return render(request, 'reservierung/reservierung_user.html', {'user': user, 'rooms_return': rooms_return, })
| mit | 3,023,080,012,261,107,700 | 52.847025 | 169 | 0.476378 | false | 4.218375 | false | false | false |
sternb0t/django-pandas | django_pandas/io.py | 1 | 3578 | import pandas as pd
from .utils import update_with_verbose
import django
def to_fields(qs, fieldnames):
for fieldname in fieldnames:
model = qs.model
for fieldname_part in fieldname.split('__'):
try:
field = model._meta.get_field(fieldname_part)
except django.db.models.fields.FieldDoesNotExist:
rels = model._meta.get_all_related_objects_with_model()
for relobj, _ in rels:
if relobj.get_accessor_name() == fieldname_part:
field = relobj.field
model = field.model
break
else:
if hasattr(field, "one_to_many") and field.one_to_many:
model = field.related_model
elif field.get_internal_type() in ('ForeignKey', 'OneToOneField', 'ManyToManyField'):
model = field.rel.to
yield field
def read_frame(qs, fieldnames=(), index_col=None, coerce_float=False,
verbose=True):
"""
Returns a dataframe from a QuerySet
Optionally specify the field names/columns to utilize and
a field as the index
Parameters
----------
qs: The Django QuerySet.
fieldnames: The model field names to use in creating the frame.
You can span a relationship in the usual Django way
by using double underscores to specify a related field
in another model
You can span a relationship in the usual Django way
by using double underscores to specify a related field
in another model
index_col: specify the field to use for the index. If the index
field is not in the field list it will be appended
coerce_float : boolean, default False
Attempt to convert values to non-string, non-numeric data (like
decimal.Decimal) to floating point, useful for SQL result sets
verbose: boolean If this is ``True`` then populate the DataFrame with the
human readable versions of any foreign key fields else use
the primary keys values.
The human readable version of the foreign key field is
defined in the ``__unicode__`` or ``__str__``
methods of the related class definition
"""
if fieldnames:
if index_col is not None and index_col not in fieldnames:
# Add it to the field names if not already there
fieldnames = tuple(fieldnames) + (index_col,)
fields = to_fields(qs, fieldnames)
elif isinstance(qs, django.db.models.query.ValuesQuerySet):
if django.VERSION < (1, 8):
annotation_field_names = qs.aggregate_names
else:
annotation_field_names = qs.annotation_names
fieldnames = qs.field_names + annotation_field_names + qs.extra_names
fields = [qs.model._meta.get_field(f) for f in qs.field_names] + \
[None] * (len(annotation_field_names) + len(qs.extra_names))
else:
fields = qs.model._meta.fields
fieldnames = [f.name for f in fields]
if isinstance(qs, django.db.models.query.ValuesQuerySet):
recs = list(qs)
else:
recs = list(qs.values_list(*fieldnames))
df = pd.DataFrame.from_records(recs, columns=fieldnames,
coerce_float=coerce_float)
if verbose:
update_with_verbose(df, fieldnames, fields)
if index_col is not None:
df.set_index(index_col, inplace=True)
return df
| bsd-3-clause | 5,211,177,889,404,389,000 | 36.663158 | 101 | 0.602571 | false | 4.44472 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.