repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
voytekresearch/neurodsp | neurodsp/tests/aperiodic/test_irasa.py | 1 | 1456 | """Tests for IRASA functions."""
import numpy as np
from neurodsp.tests.settings import FS, N_SECONDS_LONG, EXP1
from neurodsp.sim import sim_combined
from neurodsp.spectral import compute_spectrum, trim_spectrum
from neurodsp.aperiodic.irasa import *
###################################################################################################
###################################################################################################
def test_compute_irasa(tsig_comb):
# Estimate periodic and aperiodic components with IRASA
f_range = [1, 30]
freqs, psd_ap, psd_pe = compute_irasa(tsig_comb, FS, f_range, noverlap=int(2*FS))
assert len(freqs) == len(psd_ap) == len(psd_pe)
# Compute r-squared for the full model, comparing to a standard power spectrum
_, powers = trim_spectrum(*compute_spectrum(tsig_comb, FS, nperseg=int(4*FS)), f_range)
r_sq = np.corrcoef(np.array([powers, psd_ap+psd_pe]))[0][1]
assert r_sq > .95
def test_fit_irasa(tsig_comb):
# Estimate periodic and aperiodic components with IRASA & fit aperiodic
freqs, psd_ap, _ = compute_irasa(tsig_comb, FS, noverlap=int(2*FS))
b0, b1 = fit_irasa(freqs, psd_ap)
assert round(b1) == EXP1
assert np.abs(b0 - np.log10((psd_ap)[0])) < 1
def test_fit_func():
freqs = np.arange(30)
intercept = -2
slope = -2
fit = fit_func(freqs, intercept, slope)
assert (fit == slope * freqs + intercept).all()
| mit | 5,145,470,417,897,858,000 | 32.090909 | 99 | 0.581044 | false | 3.2 | false | false | false |
tkrajina/cartesius | cartesius/colors.py | 1 | 1051 | # -*- coding: utf-8 -*-
""" Utility functions folr colors """
def get_color(color):
""" Can convert from integer to (r, g, b) """
if not color:
return None
if isinstance(color, int):
temp = color
blue = temp % 256
temp = int(temp / 256)
green = temp % 256
temp = int(temp / 256)
red = temp % 256
return (red, green, blue)
if not len(color) == 3:
raise Exception('Invalid color {0}'.format(color))
return color
def brighten(color, n):
return (int((color[0] + n) % 256), int((color[1] + n) % 256), int((color[2] + n) % 256))
def darken(color, n):
return brighten(color, -n)
def get_color_between(color1, color2, i):
""" i is a number between 0 and 1, if 0 then color1, if 1 color2, ... """
if i <= 0:
return color1
if i >= 1:
return color2
return (int(color1[0] + (color2[0] - color1[0]) * i),
int(color1[1] + (color2[1] - color1[1]) * i),
int(color1[2] + (color2[2] - color1[2]) * i))
| apache-2.0 | 6,686,300,613,034,056,000 | 24.634146 | 92 | 0.527117 | false | 3.091176 | false | false | false |
ehooo/django_mqtt | test_web/settings.py | 1 | 4373 | """
Django settings for web project.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#b68qv#(v-g26k3qt_-1ufg-prvsw2p)7@ctea*n!36-w23bv1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DB_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'django_mqtt',
'django_mqtt.mosquitto.auth_plugin',
'django_mqtt.publisher',
]
FIXTURE_DIRS = [
os.path.join(BASE_DIR, 'test_web', 'fixtures')
]
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_web.urls'
MQTT_CERTS_ROOT = os.path.join(BASE_DIR, 'private')
MQTT_ACL_ALLOW = False
MQTT_ACL_ALLOW_ANONIMOUS = MQTT_ACL_ALLOW
MQTT_ALLOW_EMPTY_CLIENT_ID = False
MQTT_SESSION_TIMEOUT = 5
WSGI_APPLICATION = 'test_web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:' if DB_DEBUG else os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGGING_LEVEL = 'DEBUG' if DEBUG else 'INFO'
if 'test' in sys.argv:
LOGGING_LEVEL = 'CRITICAL'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': LOGGING_LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
'filters': ['require_debug_true']
}
}
}
| gpl-2.0 | 6,900,691,633,989,705,000 | 25.664634 | 95 | 0.641207 | false | 3.435192 | false | false | false |
matllubos/django-reversion-log | setup.py | 1 | 1150 | from setuptools import setup, find_packages
from reversion_log.version import get_version
setup(
name='django-reversion-log',
version=get_version(),
description="Log build on revisiions.",
keywords='django, reversion',
author='Lubos Matl',
author_email='[email protected]',
url='https://github.com/matllubos/django-reversion-log',
license='LGPL',
package_dir={'is_core': 'is_core'},
include_package_data=True,
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU LESSER GENERAL PUBLIC LICENSE (LGPL)',
'Natural Language :: Czech',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Site Management',
],
install_requires=[
'django>=1.6',
'django-reversion==1.8.7',
],
zip_safe=False
)
| lgpl-3.0 | -1,228,347,683,448,355,300 | 31.857143 | 78 | 0.618261 | false | 3.911565 | false | true | false |
MissionCriticalCloud/cosmic | cosmic-core/systemvm/patches/centos7/opt/cosmic/startup/setup_cpvm.py | 1 | 2608 | import logging
import os
from utils import Utils
def setup_iptable_rules(cmdline):
external_rules = ""
for cidr in cmdline.get('allowedcidrs', '').split(','):
if cidr != '':
external_rules += "-A INPUT -i " + cmdline['publicnic'] + " -s " + cidr.strip() + " -p tcp -m multiport --dports 80,443 -m tcp -j ACCEPT\n"
iptables_rules = """
*nat
:PREROUTING ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
COMMIT
*filter
:INPUT DROP [0:0]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [0:0]
-A INPUT -i lo -j ACCEPT
-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -p icmp --icmp-type 13 -j DROP
-A INPUT -p icmp -j ACCEPT
-A INPUT -i %s -p tcp -m state --state NEW -m tcp -s 169.254.0.1/32 --dport 3922 -j ACCEPT
-A INPUT -i %s -p tcp -m state --state NEW -m tcp --dport 8001 -j ACCEPT
-A INPUT -i %s -p tcp -m state --state NEW -m tcp --dport 8001 -j ACCEPT
%s
COMMIT
""" % (
cmdline['controlnic'],
cmdline['mgtnic'],
cmdline['publicnic'],
cmdline['controlnic'],
cmdline['controlnic'],
cmdline['mgtnic'],
external_rules
)
with open("/tmp/iptables-consoleproxy", "w") as f:
f.write(iptables_rules)
os.system("iptables-restore < /tmp/iptables-consoleproxy")
class ConsoleProxyVM:
def __init__(self, cmdline) -> None:
super().__init__()
self.cmdline = cmdline
self.config_dir = "/etc/cosmic/agent/"
def start(self):
logging.info("Setting up configuration for %s" % self.cmdline["type"])
self.setup_agent_config()
setup_iptable_rules(self.cmdline)
if self.cmdline['setrfc1918routes'] == 'true':
logging.info("Setting rfc1918 routes")
Utils(self.cmdline).set_rfc1918_routes()
logging.info("Setting local routes")
Utils(self.cmdline).set_local_routes()
os.system("systemctl start cosmic-agent")
def setup_agent_config(self):
if not os.path.isdir(self.config_dir):
os.makedirs(self.config_dir, 0o644, True)
consoleproxy_properties = """
consoleproxy.tcpListenPort=0
consoleproxy.httpListenPort=80
consoleproxy.httpCmdListenPort=8001
consoleproxy.jarDir=./applet/
consoleproxy.viewerLinger=180
consoleproxy.reconnectMaxRetry=5
"""
with open(self.config_dir + "consoleproxy.properties", "w") as f:
f.write(consoleproxy_properties)
Utils(self.cmdline).setup_agent_properties()
| apache-2.0 | -2,954,845,786,210,160,600 | 28.977011 | 151 | 0.635353 | false | 3.272271 | true | false | false |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/orca/structural_navigation.py | 1 | 153933 | # Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Implements structural navigation. Right now this is only
being implemented by Gecko; however it can be used in any
script providing access to document content."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
import debug
import input_event
import keybindings
import orca
import orca_state
import settings
import speech
from orca_i18n import _
from orca_i18n import ngettext
from orca_i18n import C_
#############################################################################
# #
# MatchCriteria #
# #
#############################################################################
class MatchCriteria:
"""Contains the criteria which will be used to generate a collection
matchRule. We don't want to create the rule until we need it and
are ready to use it. In addition, the creation of an AT-SPI match
rule requires you specify quite a few things (see the __init__),
most of which are irrelevant to the search at hand. This class
makes it possible for the StructuralNavigationObject creator to just
specify the few criteria that actually matter.
"""
def __init__(self,
collection,
states = [],
matchStates = None,
objAttrs = [],
matchObjAttrs = None,
roles = [],
matchRoles = None,
interfaces = "",
matchInterfaces = None,
invert = False,
applyPredicate = False):
"""Creates a new match criteria object.
Arguments:
- collection: the collection interface for the document in
which the accessible objects can be found.
- states: a list of pyatspi states of interest
- matchStates: whether an object must have all of the states
in the states list, any of the states in the list, or none
of the states in the list. Must be one of the collection
interface MatchTypes if provided.
- objAttrs: a list of object attributes (not text attributes)
- matchObjAttrs: whether an object must have all of the
attributes in the objAttrs list, any of the attributes in
the list, or none of the attributes in the list. Must be
one of the collection interface MatchTypes if provided.
- interfaces: (We aren't using this. According to the at-spi
idl, it is a string.)
- matchInterfaces: The collection MatchType for matching by
interface.
- invert: If true the match rule will find objects that don't
match. We always use False.
- applyPredicate: whether or not a predicate should be applied
as an additional check to see if an item is indeed a match.
This is necessary, for instance, when one of the things we
care about is a text attribute, something the collection
interface doesn't include in its criteria.
"""
self.collection = collection
self.matchStates = matchStates or collection.MATCH_ANY
self.objAttrs = objAttrs
self.matchObjAttrs = matchObjAttrs or collection.MATCH_ANY
self.roles = roles
self.matchRoles = matchRoles or collection.MATCH_ANY
self.interfaces = interfaces
self.matchInterfaces = matchInterfaces or collection.MATCH_ALL
self.invert = invert
self.applyPredicate = applyPredicate
self.states = pyatspi.StateSet()
for state in states:
self.states.add(state)
###########################################################################
# #
# StructuralNavigationObject #
# #
###########################################################################
class StructuralNavigationObject:
"""Represents a document object which has identifiable characteristics
which can be used for the purpose of navigation to and among instances
of that object. These characteristics may be something as simple as a
role and/or a state of interest. Or they may be something more complex
such as character counts, text attributes, and other object attributes.
"""
def __init__(self, structuralNavigation, objType, bindings, predicate,
criteria, presentation):
"""Creates a new structural navigation object.
Arguments:
- structuralNavigation: the StructuralNavigation class associated
with this object.
- objType: the type (e.g. BLOCKQUOTE) associated with this object.
- bindings: a dictionary of all of the possible bindings for this
object. In the case of all but the "atLevel" bindings, each
binding takes the form of [keysymstring, modifiers, description].
The goPreviousAtLevel and goNextAtLevel bindings are each a list
of bindings in that form.
- predicate: the predicate to use to determine if a given accessible
matches this structural navigation object. Used when a search via
collection is not possible or practical.
- criteria: a method which returns a MatchCriteria object which
can in turn be used to locate the next/previous matching accessible
via collection.
- presentation: the method which should be called after performing
the search for the structural navigation object.
"""
self.structuralNavigation = structuralNavigation
self.objType = objType
self.bindings = bindings
self.predicate = predicate
self.criteria = criteria
self.present = presentation
self.inputEventHandlers = {}
self.keyBindings = keybindings.KeyBindings()
self.functions = []
self._setUpHandlersAndBindings()
def _setUpHandlersAndBindings(self):
"""Adds the inputEventHandlers and keyBindings for this object."""
# Set up the basic handlers. These are our traditional goPrevious
# and goNext functions.
#
previousBinding = self.bindings.get("previous")
if previousBinding:
[keysymstring, modifiers, description] = previousBinding
handlerName = "%sGoPrevious" % self.objType
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(self.goPrevious, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
settings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(self.goPrevious)
nextBinding = self.bindings.get("next")
if nextBinding:
[keysymstring, modifiers, description] = nextBinding
handlerName = "%sGoNext" % self.objType
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(self.goNext, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
settings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(self.goNext)
# Set up the "at level" handlers (e.g. to navigate among headings
# at the specified level).
#
previousAtLevel = self.bindings.get("previousAtLevel") or []
for i, binding in enumerate(previousAtLevel):
level = i + 1
handler = self.goPreviousAtLevelFactory(level)
handlerName = "%sGoPreviousLevel%dHandler" % (self.objType, level)
keysymstring, modifiers, description = binding
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(handler, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
settings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(handler)
nextAtLevel = self.bindings.get("nextAtLevel") or []
for i, binding in enumerate(nextAtLevel):
level = i + 1
handler = self.goNextAtLevelFactory(level)
handlerName = "%sGoNextLevel%dHandler" % (self.objType, level)
keysymstring, modifiers, description = binding
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(handler, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
settings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(handler)
# Set up the "directional" handlers (e.g. for table cells. Live
# region support has a handler to go to the last live region,
# so we'll handle that here as well).
#
directions = {}
directions["Left"] = self.bindings.get("left")
directions["Right"] = self.bindings.get("right")
directions["Up"] = self.bindings.get("up")
directions["Down"] = self.bindings.get("down")
directions["First"] = self.bindings.get("first")
directions["Last"] = self.bindings.get("last")
for direction in directions:
binding = directions.get(direction)
if not binding:
continue
handler = self.goDirectionFactory(direction)
handlerName = "%sGo%s" % (self.objType, direction)
keysymstring, modifiers, description = binding
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(handler, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
settings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(handler)
def addHandlerAndBinding(self, binding, handlerName, function):
"""Adds a custom inputEventHandler and keybinding to the object's
handlers and bindings. Right now this is unused, but here in
case a creator of a StructuralNavigationObject had some other
desired functionality in mind.
Arguments:
- binding: [keysymstring, modifiers, description]
- handlerName: a string uniquely identifying the handler
- function: the function associated with the binding
"""
[keysymstring, modifiers, description] = binding
handler = input_event.InputEventHandler(function, description)
keyBinding = keybindings.KeyBinding(
keysymstring,
settings.defaultModifierMask,
modifiers,
handler)
self.inputEventHandlers[handlerName] = handler
self.structuralNavigation.inputEventHandlers[handlerName] = handler
self.functions.append(function)
self.structuralNavigation.functions.append(function)
self.keyBindings.add(keyBinding)
self.structuralNavigation.keyBindings.add(keyBinding)
def goPrevious(self, script, inputEvent):
"""Go to the previous object."""
self.structuralNavigation.goObject(self, False)
def goNext(self, script, inputEvent):
"""Go to the next object."""
self.structuralNavigation.goObject(self, True)
def goPreviousAtLevelFactory(self, level):
"""Generates a goPrevious method for the specified level. Right
now, this is just for headings, but it may have applicability
for other objects such as list items (i.e. for level-based
navigation in an outline or other multi-tiered list.
Arguments:
- level: the desired level of the object as an int.
"""
def goPreviousAtLevel(script, inputEvent):
self.structuralNavigation.goObject(self, False, arg=level)
return goPreviousAtLevel
def goNextAtLevelFactory(self, level):
"""Generates a goNext method for the specified level. Right
now, this is just for headings, but it may have applicability
for other objects such as list items (i.e. for level-based
navigation in an outline or other multi-tiered list.
Arguments:
- level: the desired level of the object as an int.
"""
def goNextAtLevel(script, inputEvent):
self.structuralNavigation.goObject(self, True, arg=level)
return goNextAtLevel
def goDirectionFactory(self, direction):
"""Generates the methods for navigation in a particular direction
(i.e. left, right, up, down, first, last). Right now, this is
primarily for table cells, but it may have applicability for other
objects. For example, when navigating in an outline, one might
want the ability to navigate to the next item at a given level,
but then work his/her way up/down in the hierarchy.
Arguments:
- direction: the direction in which to navigate as a string.
"""
def goCell(script, inputEvent):
thisCell = self.structuralNavigation.getCellForObj(\
self.structuralNavigation.getCurrentObject())
currentCoordinates = \
self.structuralNavigation.getCellCoordinates(thisCell)
if direction == "Left":
desiredCoordinates = [currentCoordinates[0],
currentCoordinates[1] - 1]
elif direction == "Right":
desiredCoordinates = [currentCoordinates[0],
currentCoordinates[1] + 1]
elif direction == "Up":
desiredCoordinates = [currentCoordinates[0] - 1,
currentCoordinates[1]]
elif direction == "Down":
desiredCoordinates = [currentCoordinates[0] + 1,
currentCoordinates[1]]
elif direction == "First":
desiredCoordinates = [0, 0]
else:
desiredCoordinates = [-1, -1]
table = self.structuralNavigation.getTableForCell(thisCell)
if table:
iTable = table.queryTable()
lastRow = iTable.nRows - 1
lastCol = iTable.nColumns - 1
desiredCoordinates = [lastRow, lastCol]
self.structuralNavigation.goCell(self,
thisCell,
currentCoordinates,
desiredCoordinates)
def goLastLiveRegion(script, inputEvent):
"""Go to the last liveRegion."""
if settings.inferLiveRegions:
script.liveMngr.goLastLiveRegion()
else:
# Translators: this announces to the user that live region
# support has been turned off.
#
script.presentMessage(_("Live region support is off"))
if self.objType == StructuralNavigation.TABLE_CELL:
return goCell
elif self.objType == StructuralNavigation.LIVE_REGION \
and direction == "Last":
return goLastLiveRegion
#############################################################################
# #
# StructuralNavigation #
# #
#############################################################################
class StructuralNavigation:
"""This class implements the structural navigation functionality which
is available to scripts. Scripts interested in implementing structural
navigation need to override getEnabledStructuralNavigationTypes() and
return a list of StructuralNavigation object types which should be
enabled.
"""
# The available object types.
#
# Convenience methods have been put into place whereby one can
# create an object (FOO = "foo"), and then provide the following
# methods: _fooBindings(), _fooPredicate(), _fooCriteria(), and
# _fooPresentation(). With these in place, and with the object
# FOO included among the object types returned by the script's
# getEnabledStructuralNavigationTypes(), the StructuralNavigation
# object should be created and set up automagically. At least that
# is the idea. :-) This hopefully will also enable easy re-definition
# of existing StructuralNavigationObjects on a script-by-script basis.
# For instance, in the soffice script, overriding _blockquotePredicate
# should be all that is needed to implement navigation by blockquote
# in OOo Writer documents.
#
ANCHOR = "anchor"
BLOCKQUOTE = "blockquote"
BUTTON = "button"
CHECK_BOX = "checkBox"
CHUNK = "chunk"
COMBO_BOX = "comboBox"
ENTRY = "entry"
FORM_FIELD = "formField"
HEADING = "heading"
LANDMARK = "landmark"
LIST = "list" # Bulleted/numbered lists
LIST_ITEM = "listItem" # Bulleted/numbered list items
LIVE_REGION = "liveRegion"
PARAGRAPH = "paragraph"
RADIO_BUTTON = "radioButton"
SEPARATOR = "separator"
TABLE = "table"
TABLE_CELL = "tableCell"
UNVISITED_LINK = "unvisitedLink"
VISITED_LINK = "visitedLink"
# Whether or not to attempt to use collection. There's no point
# in bothering if we know that the collection interface has not
# been implemented in a given app (e.g. StarOffice/OOo) so this
# variable can be overridden.
#
collectionEnabled = settings.useCollection
# Roles which are recognized as being a form field. Note that this
# is for the purpose of match rules and predicates and refers to
# AT-SPI roles.
#
FORM_ROLES = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_DOCUMENT_FRAME, # rich text editing
pyatspi.ROLE_LIST,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_SPIN_BUTTON,
pyatspi.ROLE_TEXT]
# Roles which are recognized as being potential "large objects"
# or "chunks." Note that this refers to AT-SPI roles.
#
OBJECT_ROLES = [pyatspi.ROLE_HEADING,
pyatspi.ROLE_LIST,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_TABLE,
pyatspi.ROLE_TABLE_CELL,
pyatspi.ROLE_TEXT,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_DOCUMENT_FRAME]
def __init__(self, script, enabledTypes, enabled=False):
"""Creates an instance of the StructuralNavigation class.
Arguments:
- script: the script which which this instance is associated.
- enabledTypes: a list of StructuralNavigation object types
which the script is interested in supporting.
- enabled: Whether structural navigation should start out
enabled. For instance, in Gecko by default we do what it
enabled; in soffice, we would want to start out with it
disabled and have the user enable it via a keystroke when
desired.
"""
self._script = script
self.enabled = enabled
# Create all of the StructuralNavigationObject's in which the
# script is interested, using the convenience method
#
self.enabledObjects = {}
for objType in enabledTypes:
self.enabledObjects[objType] = \
self.structuralNavigationObjectCreator(objType)
self.functions = []
self.inputEventHandlers = {}
self.setupInputEventHandlers()
self.keyBindings = self.getKeyBindings()
# When navigating in a non-uniform table, one can move to a
# cell which spans multiple rows and/or columns. When moving
# beyond that cell, into a cell that does NOT span multiple
# rows/columns, we want to be sure we land in the right place.
# Therefore, we'll store the coordinates from "our perspective."
#
self.lastTableCell = [-1, -1]
def structuralNavigationObjectCreator(self, name):
"""This convenience method creates a StructuralNavigationObject
with the specified name and associated characterists. (See the
"Objects" section of code near the end of this class. Creators
of StructuralNavigationObject's can still do things the old
fashioned way should they so choose, by creating the instance
and then adding it via addObject().
Arguments:
- name: the name/objType associated with this object.
"""
# We're going to assume bindings. After all, a structural
# navigation object is by defintion an object which one can
# navigate to using the associated keybindings. For similar
# reasons we'll also assume a predicate and a presentation
# method. (See the Objects section towards the end of this
# class for examples of each.)
#
bindings = eval("self._%sBindings()" % name)
predicate = eval("self._%sPredicate" % name)
presentation = eval("self._%sPresentation" % name)
# We won't make this assumption for match criteria because
# the collection interface might not be implemented (e.g.
# StarOffice/OpenOffice) and/or its use might not be possible
# or practical for a given StructuralNavigationObject (e.g.
# matching by text attributes, spatial navigation within tables).
#
try:
criteria = eval("self._%sCriteria" % name)
except:
criteria = None
return StructuralNavigationObject(self, name, bindings, predicate,
criteria, presentation)
def addObject(self, objType, structuralNavigationObject):
"""Adds structuralNavigationObject to the dictionary of enabled
objects.
Arguments:
- objType: the name/object type of the StructuralNavigationObject.
- structuralNavigationObject: the StructuralNavigationObject to
add.
"""
self.enabledObjects[objType] = structuralNavigationObject
def setupInputEventHandlers(self):
"""Defines InputEventHandler fields for a script."""
if not len(self.enabledObjects):
return
self.inputEventHandlers["toggleStructuralNavigationHandler"] = \
input_event.InputEventHandler(
self.toggleStructuralNavigation,
# Translators: the structural navigation keys are designed
# to move the caret around the document content by object
# type. Thus H moves you to the next heading, Shift H to
# the previous heading, T to the next table, and so on.
# This feature needs to be toggle-able so that it does not
# interfere with normal writing functions.
#
_("Toggles structural navigation keys."))
for structuralNavigationObject in self.enabledObjects.values():
self.inputEventHandlers.update(\
structuralNavigationObject.inputEventHandlers)
self.functions.extend(structuralNavigationObject.functions)
def getKeyBindings(self):
"""Defines the structural navigation key bindings for a script.
Returns: an instance of keybindings.KeyBindings.
"""
keyBindings = keybindings.KeyBindings()
if not len(self.enabledObjects):
return keyBindings
keyBindings.add(
keybindings.KeyBinding(
"z",
settings.defaultModifierMask,
settings.ORCA_MODIFIER_MASK,
self.inputEventHandlers["toggleStructuralNavigationHandler"]))
for structuralNavigationObject in self.enabledObjects.values():
bindings = structuralNavigationObject.keyBindings.keyBindings
for keybinding in bindings:
keyBindings.add(keybinding)
return keyBindings
#########################################################################
# #
# Input Event Handler Methods #
# #
#########################################################################
def toggleStructuralNavigation(self, script, inputEvent):
"""Toggles structural navigation keys."""
self.enabled = not self.enabled
if self.enabled:
# Translators: the structural navigation keys are designed
# to move the caret around document content by object type.
# Thus H moves you to the next heading, Shift H to the
# previous heading, T to the next table, and so on. Some
# users prefer to turn this off to use Firefox's search
# when typing feature. This message is sent to both the
# braille display and the speech synthesizer when the user
# toggles the structural navigation feature of Orca.
# It should be a brief informative message.
#
string = _("Structural navigation keys on.")
else:
# Translators: the structural navigation keys are designed
# to move the caret around document content by object type.
# Thus H moves you to the next heading, Shift H to the
# previous heading, T to the next table, and so on. Some
# users prefer to turn this off to use Firefox's search
# when typing feature. This message is sent to both the
# braille display and the speech synthesizer when the user
# toggles the structural navigation feature of Orca.
# It should be a brief informative message.
#
string = _("Structural navigation keys off.")
debug.println(debug.LEVEL_CONFIGURATION, string)
self._script.presentMessage(string)
#########################################################################
# #
# Methods for Moving to Objects #
# #
#########################################################################
def goCell(self, structuralNavigationObject, thisCell,
currentCoordinates, desiredCoordinates):
"""The method used for navigation among cells in a table.
Arguments:
- structuralNavigationObject: the StructuralNavigationObject which
represents the table cell.
- thisCell: the pyatspi accessible TABLE_CELL we're currently in
- currentCoordinates: the [row, column] of thisCell. Note, we
cannot just get the coordinates because in table cells which
span multiple rows and/or columns, the value returned by
table.getRowAtIndex() is the first row the cell spans. Likewise,
the value returned by table.getColumnAtIndex() is the left-most
column. Therefore, we keep track of the row and column from
our perspective to ensure we stay in the correct row and column.
- desiredCoordinates: the [row, column] where we think we'd like to
be.
"""
table = self.getTableForCell(thisCell)
try:
iTable = table.queryTable()
except:
# Translators: this is for navigating document content by
# moving from table cell to table cell. If the user gives a
# table navigation command but is not in a table, Orca speaks
# this message.
#
self._script.presentMessage(_("Not in a table."))
return None
currentRow, currentCol = currentCoordinates
desiredRow, desiredCol = desiredCoordinates
rowDiff = desiredRow - currentRow
colDiff = desiredCol - currentCol
oldRowHeaders = self._getRowHeaders(thisCell)
oldColHeaders = self._getColumnHeaders(thisCell)
cell = thisCell
while cell:
cell = iTable.getAccessibleAt(desiredRow, desiredCol)
if not cell:
if desiredCol < 0:
# Translators: this is for navigating document
# content by moving from table cell to table cell.
# This is the message spoken when the user attempts
# to move to the left of the current cell and is
# already in the first column.
#
self._script.presentMessage(_("Beginning of row."))
desiredCol = 0
elif desiredCol > iTable.nColumns - 1:
# Translators: this is for navigating document
# content by moving from table cell to table cell.
# This is the message spoken when the user attempts
# to move to the right of the current cell and is
# already in the last column.
#
self._script.presentMessage(_("End of row."))
desiredCol = iTable.nColumns - 1
if desiredRow < 0:
# Translators: this is for navigating document
# content by moving from table cell to table cell.
# This is the message spoken when the user attempts
# to move to the cell above the current cell and is
# already in the first row.
#
self._script.presentMessage(_("Top of column."))
desiredRow = 0
elif desiredRow > iTable.nRows - 1:
# Translators: this is for navigating document
# content by moving from table cell to table cell.
# This is the message spoken when the user attempts
# to move to the cell below the current cell and is
# already in the last row.
#
self._script.presentMessage(_("Bottom of column."))
desiredRow = iTable.nRows - 1
elif self._script.utilities.isSameObject(thisCell, cell) \
or settings.skipBlankCells and self._isBlankCell(cell):
if colDiff < 0:
desiredCol -= 1
elif colDiff > 0:
desiredCol += 1
if rowDiff < 0:
desiredRow -= 1
elif rowDiff > 0:
desiredRow += 1
else:
break
self.lastTableCell = [desiredRow, desiredCol]
if cell:
arg = [rowDiff, colDiff, oldRowHeaders, oldColHeaders]
structuralNavigationObject.present(cell, arg)
def goObject(self, structuralNavigationObject, isNext, obj=None, arg=None):
"""The method used for navigation among StructuralNavigationObjects
which are not table cells.
Arguments:
- structuralNavigationObject: the StructuralNavigationObject which
represents the object of interest.
- isNext: If True, we're interested in the next accessible object
which matches structuralNavigationObject. If False, we're
interested in the previous accessible object which matches.
- obj: the current object (typically the locusOfFocus).
- arg: optional arguments which may need to be passed along to
the predicate, presentation method, etc. For instance, in the
case of navigating amongst headings at a given level, the level
is needed and passed in as arg.
"""
obj = obj or self.getCurrentObject()
# Yelp is seemingly fond of killing children for sport. Better
# check for that.
#
try:
state = obj.getState()
except:
return [None, False]
else:
if state.contains(pyatspi.STATE_DEFUNCT):
#print "goObject: defunct object", obj
debug.printException(debug.LEVEL_SEVERE)
return [None, False]
success = False
wrap = settings.wrappedStructuralNavigation
# Try to find it using Collection first. But don't do this with form
# fields for now. It's a bit faster moving to the next form field,
# but not on pages with huge forms (e.g. bugzilla's advanced search
# page). And due to bug #538680, we definitely don't want to use
# collection to go to the previous chunk or form field.
#
formObjects = [self.BUTTON, self.CHECK_BOX, self.COMBO_BOX,
self.ENTRY, self.FORM_FIELD, self.RADIO_BUTTON]
criteria = None
objType = structuralNavigationObject.objType
if self.collectionEnabled \
and not objType in formObjects \
and (isNext or objType != self.CHUNK):
try:
document = self._getDocument()
collection = document.queryCollection()
if structuralNavigationObject.criteria:
criteria = structuralNavigationObject.criteria(collection,
arg)
except:
debug.printException(debug.LEVEL_SEVERE)
else:
# If the document frame itself contains content and that is
# our current object, querying the collection interface will
# result in our starting at the top when looking for the next
# object rather than the current caret offset. See bug 567984.
#
if isNext \
and self._script.utilities.isSameObject(obj, document):
criteria = None
if criteria:
try:
rule = collection.createMatchRule(criteria.states.raw(),
criteria.matchStates,
criteria.objAttrs,
criteria.matchObjAttrs,
criteria.roles,
criteria.matchRoles,
criteria.interfaces,
criteria.matchInterfaces,
criteria.invert)
if criteria.applyPredicate:
predicate = structuralNavigationObject.predicate
else:
predicate = None
if not isNext:
[obj, wrapped] = self._findPrevByMatchRule(collection,
rule,
wrap,
obj,
predicate)
else:
[obj, wrapped] = self._findNextByMatchRule(collection,
rule,
wrap,
obj,
predicate)
success = True
collection.freeMatchRule(rule)
# print "collection", structuralNavigationObject.objType
except NotImplementedError:
debug.printException(debug.LEVEL_SEVERE)
except:
debug.printException(debug.LEVEL_SEVERE)
collection.freeMatchRule(rule)
# Do it iteratively when Collection failed or is disabled
#
if not success:
pred = structuralNavigationObject.predicate
if not isNext:
[obj, wrapped] = self._findPrevByPredicate(pred, wrap,
obj, arg)
else:
[obj, wrapped] = self._findNextByPredicate(pred, wrap,
obj, arg)
# print "predicate", structuralNavigationObject.objType
if wrapped:
if not isNext:
# Translators: when the user is attempting to locate a
# particular object and the top of a page or list is
# reached without that object being found, we "wrap" to
# the bottom and continue looking upwards. We need to
# inform the user when this is taking place.
#
self._script.presentMessage(_("Wrapping to bottom."))
else:
# Translators: when the user is attempting to locate a
# particular object and the bottom of a page or list is
# reached without that object being found, we "wrap" to the
# top and continue looking downwards. We need to inform the
# user when this is taking place.
#
self._script.presentMessage(_("Wrapping to top."))
structuralNavigationObject.present(obj, arg)
#########################################################################
# #
# Utility Methods for Finding Objects #
# #
#########################################################################
def getCurrentObject(self):
"""Returns the current object. Normally, the locusOfFocus. But
in the case of Gecko, that doesn't always work.
"""
return orca_state.locusOfFocus
def _findPrevByMatchRule(self, collection, matchRule, wrap, currentObj,
predicate=None):
"""Finds the previous object using the given match rule as a
pattern to match or not match.
Arguments:
-collection: the accessible collection interface
-matchRule: the collections match rule to use
-wrap: if True and the bottom of the document is reached, move
to the top and keep looking.
-currentObj: the object from which the search should begin
-predicate: an optional predicate to further test if the item
found via collection is indeed a match.
Returns: [obj, wrapped] where wrapped is a boolean reflecting
whether wrapping took place.
"""
currentObj = currentObj or self.getCurrentObject()
document = self._getDocument()
# If the current object is the document itself, find an actual
# object to use as the starting point. Otherwise we're in
# danger of skipping over the objects in between our present
# location and top of the document.
#
if self._script.utilities.isSameObject(currentObj, document):
currentObj = self._findNextObject(currentObj, document)
ancestors = []
obj = currentObj.parent
if obj.getRole() in [pyatspi.ROLE_LIST, pyatspi.ROLE_TABLE]:
ancestors.append(obj)
else:
while obj:
ancestors.append(obj)
obj = obj.parent
match, wrapped = None, False
results = collection.getMatchesTo(currentObj,
matchRule,
collection.SORT_ORDER_CANONICAL,
collection.TREE_INORDER,
True,
1,
True)
while not match:
if len(results) == 0:
if wrapped or not wrap:
break
elif wrap:
lastObj = self._findLastObject(document)
# Collection does not do an inclusive search, meaning
# that the start object is not part of the search. So
# we need to test the lastobj separately using the given
# matchRule. We don't have this problem for 'Next' because
# the startobj is the doc frame.
#
secondLastObj = self._findPreviousObject(lastObj, document)
results = collection.getMatchesFrom(\
secondLastObj,
matchRule,
collection.SORT_ORDER_CANONICAL,
collection.TREE_INORDER,
1,
True)
wrapped = True
if len(results) > 0 \
and (not predicate or predicate(results[0])):
match = results[0]
else:
results = collection.getMatchesTo(\
lastObj,
matchRule,
collection.SORT_ORDER_CANONICAL,
collection.TREE_INORDER,
True,
1,
True)
elif len(results) > 0:
if results[0] in ancestors \
or predicate and not predicate(results[0]):
results = collection.getMatchesTo(\
results[0],
matchRule,
collection.SORT_ORDER_CANONICAL,
collection.TREE_INORDER,
True,
1,
True)
else:
match = results[0]
return [match, wrapped]
def _findNextByMatchRule(self, collection, matchRule, wrap, currentObj,
predicate=None):
"""Finds the next object using the given match rule as a pattern
to match or not match.
Arguments:
-collection: the accessible collection interface
-matchRule: the collections match rule to use
-wrap: if True and the bottom of the document is reached, move
to the top and keep looking.
-currentObj: the object from which the search should begin
-predicate: an optional predicate to further test if the item
found via collection is indeed a match.
Returns: [obj, wrapped] where wrapped is a boolean reflecting
whether wrapping took place.
"""
currentObj = currentObj or self.getCurrentObject()
ancestors = []
[currentObj, offset] = self._script.getCaretContext()
obj = currentObj.parent
while obj:
ancestors.append(obj)
obj = obj.parent
match, wrapped = None, False
while not match:
results = collection.getMatchesFrom(\
currentObj,
matchRule,
collection.SORT_ORDER_CANONICAL,
collection.TREE_INORDER,
1,
True)
if len(results) > 0 and not results[0] in ancestors:
currentObj = results[0]
if not predicate or predicate(currentObj):
match = currentObj
elif wrap and not wrapped:
wrapped = True
ancestors = [currentObj]
currentObj = self._getDocument()
else:
break
return [match, wrapped]
def _findPrevByPredicate(self, pred, wrap, currentObj=None, arg=None):
"""Finds the caret offset at the beginning of the previous object
using the given predicate as a pattern to match.
Arguments:
-pred: a python callable that takes an accessible argument and
returns true/false based on some match criteria
-wrap: if True and the top of the document is reached, move
to the bottom and keep looking.
-currentObj: the object from which the search should begin
-arg: an additional value to be passed to the predicate
Returns: [obj, wrapped] where wrapped is a boolean reflecting
whether wrapping took place.
"""
currentObj = currentObj or self.getCurrentObject()
document = self._getDocument()
# If the current object is the document itself, find an actual
# object to use as the starting point. Otherwise we're in
# danger of skipping over the objects in between our present
# location and top of the document.
#
if self._script.utilities.isSameObject(currentObj, document):
currentObj = self._findNextObject(currentObj, document)
ancestors = []
nestableRoles = [pyatspi.ROLE_LIST, pyatspi.ROLE_TABLE]
obj = currentObj.parent
while obj:
ancestors.append(obj)
obj = obj.parent
obj = self._findPreviousObject(currentObj, document)
wrapped = obj is None
match = None
if wrapped:
obj = self._findLastObject(document)
while obj and not match:
isNested = (obj != currentObj.parent \
and currentObj.parent.getRole() == obj.getRole() \
and obj.getRole() in nestableRoles)
if (not obj in ancestors or isNested) and pred(obj):
if wrapped \
and self._script.utilities.isSameObject(currentObj, obj):
break
else:
match = obj
else:
obj = self._findPreviousObject(obj, document)
if not obj and wrap and not wrapped:
obj = self._findLastObject(document)
wrapped = True
return [match, wrapped]
def _findNextByPredicate(self, pred, wrap, currentObj=None, arg=None):
"""Finds the caret offset at the beginning of the next object
using the given predicate as a pattern to match or not match.
Arguments:
-pred: a python callable that takes an accessible argument and
returns true/false based on some match criteria
-wrap: if True and the bottom of the document is reached, move
to the top and keep looking.
-currentObj: the object from which the search should begin
-arg: an additional value to be passed to the predicate
Returns: [obj, wrapped] where wrapped is a boolean reflecting
whether wrapping took place.
"""
currentObj = currentObj or self.getCurrentObject()
ancestors = []
obj = currentObj.parent
while obj:
ancestors.append(obj)
obj = obj.parent
document = self._getDocument()
obj = self._findNextObject(currentObj, document)
wrapped = obj is None
match = None
if wrapped:
[obj, offset] = self._getCaretPosition(document)
while obj and not match:
if (not obj in ancestors) and pred(obj, arg):
if wrapped \
and self._script.utilities.isSameObject(currentObj, obj):
break
else:
match = obj
else:
obj = self._findNextObject(obj, document)
if not obj and wrap and not wrapped:
[obj, offset] = self._getCaretPosition(document)
wrapped = True
return [match, wrapped]
def _findPreviousObject(self, obj, stopAncestor):
"""Finds the object prior to this one, where the tree we're
dealing with is a DOM and 'prior' means the previous object
in a linear presentation sense.
Arguments:
-obj: the object where to start.
-stopAncestor: the ancestor at which the search should stop
"""
# NOTE: This method is based on some intial experimentation
# with OOo structural navigation. It might need refining
# or fixing and is being overridden by the Gecko method
# regardless, so this one can be modified as appropriate.
#
prevObj = None
index = obj.getIndexInParent() - 1
if index >= 0:
prevObj = obj.parent[index]
if prevObj.childCount:
prevObj = prevObj[prevObj.childCount - 1]
elif not self._script.utilities.isSameObject(obj.parent, stopAncestor):
prevObj = obj.parent
return prevObj
def _findNextObject(self, obj, stopAncestor):
"""Finds the object after to this one, where the tree we're
dealing with is a DOM and 'next' means the next object
in a linear presentation sense.
Arguments:
-obj: the object where to start.
-stopAncestor: the ancestor at which the search should stop
"""
# NOTE: This method is based on some intial experimentation
# with OOo structural navigation. It might need refining
# or fixing and is being overridden by the Gecko method
# regardless, so this one can be modified as appropriate.
#
nextObj = None
if obj and obj.childCount:
nextObj = obj[0]
while obj and obj.parent != obj and not nextObj:
index = obj.getIndexInParent() + 1
if 0 < index < obj.parent.childCount:
nextObj = obj.parent[index]
elif not self._script.utilities.isSameObject(
obj.parent, stopAncestor):
obj = obj.parent
else:
break
return nextObj
def _findLastObject(self, ancestor):
"""Returns the last object in ancestor.
Arguments:
- ancestor: the accessible object whose last (child) object
is sought.
"""
# NOTE: This method is based on some intial experimentation
# with OOo structural navigation. It might need refining
# or fixing and is being overridden by the Gecko method
# regardless, so this one can be modified as appropriate.
#
if not ancestor or not ancestor.childCount:
return ancestor
lastChild = ancestor[ancestor.childCount - 1]
while lastChild:
lastObj = self._findNextObject(lastChild, ancestor)
if lastObj:
lastChild = lastObj
else:
break
return lastChild
def _getDocument(self):
"""Returns the document or other object in which the object of
interest is contained.
"""
docRoles = [pyatspi.ROLE_DOCUMENT_FRAME]
stopRoles = [pyatspi.ROLE_FRAME, pyatspi.ROLE_SCROLL_PANE]
document = self._script.utilities.ancestorWithRole(
orca_state.locusOfFocus, docRoles, stopRoles)
return document
def _isInDocument(self, obj):
"""Returns True if the accessible object obj is inside of
the document.
Arguments:
-obj: the accessible object of interest.
"""
document = self._getDocument()
while obj and obj.parent:
if self._script.utilities.isSameObject(obj.parent, document):
return True
else:
obj = obj.parent
return False
def _isUselessObject(self, obj):
"""Returns True if the accessible object obj is an object
that doesn't have any meaning associated with it. Individual
scripts should override this method as needed. Gecko does.
Arguments:
- obj: the accessible object of interest.
"""
return False
#########################################################################
# #
# Methods for Presenting Objects #
# #
#########################################################################
def _getTableCaption(self, obj):
"""Returns a string which contains the table caption, or
None if a caption could not be found.
Arguments:
- obj: the accessible table whose caption we want.
"""
caption = obj.queryTable().caption
try:
caption.queryText()
except:
return None
else:
return self._script.utilities.displayedText(caption)
def _getTableDescription(self, obj):
"""Returns a string which describes the table."""
nonUniformString = ""
nonUniform = self._isNonUniformTable(obj)
if nonUniform:
# Translators: a uniform table is one in which each table
# cell occupies one row and one column (i.e. a perfect grid)
# In contrast, a non-uniform table is one in which at least
# one table cell occupies more than one row and/or column.
#
nonUniformString = _("Non-uniform") + " "
table = obj.queryTable()
nRows = table.nRows
nColumns = table.nColumns
# Translators: this represents the number of rows in a table.
#
rowString = ngettext("table with %d row",
"table with %d rows",
nRows) % nRows
# Translators: this represents the number of columns in a table.
#
colString = ngettext("%d column",
"%d columns",
nColumns) % nColumns
return (nonUniformString + rowString + " " + colString)
def _isNonUniformTable(self, obj):
"""Returns True if the obj is a non-uniform table (i.e. a table
where at least one cell spans multiple rows and/or columns).
Arguments:
- obj: the table to examine
"""
try:
table = obj.queryTable()
except:
pass
else:
for i in xrange(obj.childCount):
[isCell, row, col, rowExtents, colExtents, isSelected] = \
table.getRowColumnExtentsAtIndex(i)
if (rowExtents > 1) or (colExtents > 1):
return True
return False
def getCellForObj(self, obj):
"""Looks for a table cell in the ancestry of obj, if obj is not a
table cell.
Arguments:
- obj: the accessible object of interest.
"""
cellRoles = [pyatspi.ROLE_TABLE_CELL, pyatspi.ROLE_COLUMN_HEADER]
if obj and not obj.getRole() in cellRoles:
document = self._getDocument()
obj = self._script.utilities.ancestorWithRole(
obj, cellRoles, [document.getRole()])
return obj
def getTableForCell(self, obj):
"""Looks for a table in the ancestry of obj, if obj is not a table.
Arguments:
- obj: the accessible object of interest.
"""
if obj and obj.getRole() != pyatspi.ROLE_TABLE:
document = self._getDocument()
obj = self._script.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_TABLE], [document.getRole()])
return obj
def _isBlankCell(self, obj):
"""Returns True if the table cell is empty or consists of whitespace.
Arguments:
- obj: the accessible table cell to examime
"""
if obj and obj.getRole() == pyatspi.ROLE_COLUMN_HEADER and obj.name:
return False
text = self._script.utilities.displayedText(obj)
if text and len(text.strip()) and text != obj.name:
return False
else:
for child in obj:
text = self._script.utilities.displayedText(child)
if text and len(text.strip()) \
or child.getRole() == pyatspi.ROLE_LINK:
return False
return True
def _getCellText(self, obj):
"""Looks at the table cell and tries to get its text.
Arguments:
- obj: the accessible table cell to examime
"""
text = ""
if obj and not obj.childCount:
text = self._script.utilities.displayedText(obj)
else:
for child in obj:
childText = self._script.utilities.displayedText(child)
text = self._script.utilities.appendString(text, childText)
return text
def _presentCellHeaders(self, cell, oldCellInfo):
"""Speaks the headers of the accessible table cell, cell.
Arguments:
- cell: the accessible table cell whose headers we wish to
present.
- oldCellInfo: [rowDiff, colDiff, oldRowHeaders, oldColHeaders]
"""
if not cell or not oldCellInfo:
return
rowDiff, colDiff, oldRowHeaders, oldColHeaders = oldCellInfo
if not (oldRowHeaders or oldColHeaders):
return
# We only want to speak the header information that has
# changed, and we don't want to speak headers if we're in
# a header row/col.
#
if rowDiff and not self._isInHeaderRow(cell):
rowHeaders = self._getRowHeaders(cell)
for header in rowHeaders:
if not header in oldRowHeaders:
text = self._getCellText(header)
speech.speak(text)
if colDiff and not self._isInHeaderColumn(cell):
colHeaders = self._getColumnHeaders(cell)
for header in colHeaders:
if not header in oldColHeaders:
text = self._getCellText(header)
speech.speak(text)
def _getCellSpanInfo(self, obj):
"""Returns a string reflecting the number of rows and/or columns
spanned by a table cell when multiple rows and/or columns are
spanned.
Arguments:
- obj: the accessible table cell whose cell span we want.
"""
if not obj or (obj.getRole() != pyatspi.ROLE_TABLE_CELL):
return
parentTable = self.getTableForCell(obj)
try:
table = parentTable.queryTable()
except:
return
[row, col] = self.getCellCoordinates(obj)
rowspan = table.getRowExtentAt(row, col)
colspan = table.getColumnExtentAt(row, col)
spanString = ""
if (colspan > 1) and (rowspan > 1):
# Translators: The cell here refers to a cell within a table
# within a document. We need to announce when the cell occupies
# or "spans" more than a single row and/or column.
#
spanString = ngettext("Cell spans %d row",
"Cell spans %d rows",
rowspan) % rowspan
# Translators: this represents the number of columns in a table.
#
spanString += ngettext(" %d column",
" %d columns",
colspan) % colspan
elif (colspan > 1):
# Translators: The cell here refers to a cell within a table
# within a document. We need to announce when the cell occupies
# or "spans" more than a single row and/or column.
#
spanString = ngettext("Cell spans %d column",
"Cell spans %d columns",
colspan) % colspan
elif (rowspan > 1):
# Translators: The cell here refers to a cell within a table
# within a document. We need to announce when the cell occupies
# or "spans" more than a single row and/or column.
#
spanString = ngettext("Cell spans %d row",
"Cell spans %d rows",
rowspan) % rowspan
return spanString
def getCellCoordinates(self, obj):
"""Returns the [row, col] of a ROLE_TABLE_CELL or [-1, -1]
if the coordinates cannot be found.
Arguments:
- obj: the accessible table cell whose coordinates we want.
"""
obj = self.getCellForObj(obj)
parent = self.getTableForCell(obj)
try:
table = parent.queryTable()
except:
pass
else:
# If we're in a cell that spans multiple rows and/or columns,
# thisRow and thisCol will refer to the upper left cell in
# the spanned range(s). We're storing the lastTableCell that
# we're aware of in order to facilitate more linear movement.
# Therefore, if the lastTableCell and this table cell are the
# same cell, we'll go with the stored coordinates.
#
lastRow, lastCol = self.lastTableCell
lastKnownCell = table.getAccessibleAt(lastRow, lastCol)
if self._script.utilities.isSameObject(lastKnownCell, obj):
return [lastRow, lastCol]
else:
index = self._script.utilities.cellIndex(obj)
thisRow = table.getRowAtIndex(index)
thisCol = table.getColumnAtIndex(index)
return [thisRow, thisCol]
return [-1, -1]
def _getRowHeaders(self, obj):
"""Returns a list of table cells that serve as a row header for
the specified TABLE_CELL.
Arguments:
- obj: the accessible table cell whose header(s) we want.
"""
rowHeaders = []
if not obj:
return rowHeaders
parentTable = self.getTableForCell(obj)
try:
table = parentTable.queryTable()
except:
pass
else:
[row, col] = self.getCellCoordinates(obj)
# Theoretically, we should be able to quickly get the text
# of a {row, column}Header via get{Row,Column}Description().
# Gecko doesn't expose the information that way, however.
# get{Row,Column}Header seems to work sometimes.
#
header = table.getRowHeader(row)
if header:
rowHeaders.append(header)
# Headers that are strictly marked up with <th> do not seem
# to be exposed through get{Row, Column}Header.
#
else:
# If our cell spans multiple rows, we want to get all of
# the headers that apply.
#
rowspan = table.getRowExtentAt(row, col)
for r in range(row, row+rowspan):
# We could have multiple headers for a given row, one
# header per column. Presumably all of the headers are
# prior to our present location.
#
for c in range(0, col):
cell = table.getAccessibleAt(r, c)
if self._isHeader(cell) and not cell in rowHeaders:
rowHeaders.append(cell)
return rowHeaders
def _getColumnHeaders(self, obj):
"""Returns a list of table cells that serve as a column header for
the specified TABLE_CELL.
Arguments:
- obj: the accessible table cell whose header(s) we want.
"""
columnHeaders = []
if not obj:
return columnHeaders
parentTable = self.getTableForCell(obj)
try:
table = parentTable.queryTable()
except:
pass
else:
[row, col] = self.getCellCoordinates(obj)
# Theoretically, we should be able to quickly get the text
# of a {row, column}Header via get{Row,Column}Description().
# Gecko doesn't expose the information that way, however.
# get{Row,Column}Header seems to work sometimes.
#
header = table.getColumnHeader(col)
if header:
columnHeaders.append(header)
# Headers that are strictly marked up with <th> do not seem
# to be exposed through get{Row, Column}Header.
#
else:
# If our cell spans multiple columns, we want to get all of
# the headers that apply.
#
colspan = table.getColumnExtentAt(row, col)
for c in range(col, col+colspan):
# We could have multiple headers for a given column, one
# header per row. Presumably all of the headers are
# prior to our present location.
#
for r in range(0, row):
cell = table.getAccessibleAt(r, c)
if self._isHeader(cell) and not cell in columnHeaders:
columnHeaders.append(cell)
return columnHeaders
def _isInHeaderRow(self, obj):
"""Returns True if all of the cells in the same row as this cell are
headers.
Arguments:
- obj: the accessible table cell whose row is to be examined.
"""
if obj and obj.getRole() == pyatspi.ROLE_TABLE_CELL:
parentTable = self.getTableForCell(obj)
try:
table = parentTable.queryTable()
except:
return True
index = self._script.utilities.cellIndex(obj)
row = table.getRowAtIndex(index)
for col in xrange(table.nColumns):
cell = table.getAccessibleAt(row, col)
if not self._isHeader(cell):
return False
return True
def _isInHeaderColumn(self, obj):
"""Returns True if all of the cells in the same column as this cell
are headers.
Arguments:
- obj: the accessible table cell whose column is to be examined.
"""
if obj and obj.getRole() == pyatspi.ROLE_TABLE_CELL:
parentTable = self.getTableForCell(obj)
try:
table = parentTable.queryTable()
except:
return True
index = self._script.utilities.cellIndex(obj)
col = table.getColumnAtIndex(index)
for row in xrange(table.nRows):
cell = table.getAccessibleAt(row, col)
if not self._isHeader(cell):
return False
return True
def _isHeader(self, obj):
"""Returns True if the table cell is a header.
Arguments:
- obj: the accessible table cell to examine.
"""
if not obj:
return False
elif obj.getRole() in [pyatspi.ROLE_TABLE_COLUMN_HEADER,
pyatspi.ROLE_TABLE_ROW_HEADER,
pyatspi.ROLE_COLUMN_HEADER]:
return True
else:
attributes = obj.getAttributes()
if attributes:
for attribute in attributes:
if attribute == "tag:TH":
return True
return False
def _getHeadingLevel(self, obj):
"""Determines the heading level of the given object. A value
of 0 means there is no heading level.
Arguments:
- obj: the accessible whose heading level we want.
"""
level = 0
if obj is None:
return level
if obj.getRole() == pyatspi.ROLE_HEADING:
attributes = obj.getAttributes()
if attributes is None:
return level
for attribute in attributes:
if attribute.startswith("level:"):
level = int(attribute.split(":")[1])
break
return level
def _getCaretPosition(self, obj):
"""Returns the [obj, characterOffset] where the caret should be
positioned. For most scripts, the object should not change and
the offset should be 0. That's not always the case with Gecko.
Arguments:
- obj: the accessible object in which the caret should be
positioned.
"""
return [obj, 0]
def _setCaretPosition(self, obj, characterOffset):
"""Sets the caret at the specified offset within obj.
Arguments:
- obj: the accessible object in which the caret should be
positioned.
- characterOffset: the offset at which to position the caret.
"""
try:
text = obj.queryText()
text.setCaretOffset(characterOffset)
except NotImplementedError:
try:
obj.queryComponent().grabFocus()
except:
debug.printException(debug.LEVEL_SEVERE)
except:
debug.printException(debug.LEVEL_SEVERE)
orca.setLocusOfFocus(None, obj, notifyScript=False)
def _presentLine(self, obj, offset):
"""Presents the first line of the object to the user.
Arguments:
- obj: the accessible object to be presented.
- offset: the character offset within obj.
"""
self._script.updateBraille(obj)
self._script.sayLine(obj)
def _presentObject(self, obj, offset):
"""Presents the entire object to the user.
Arguments:
- obj: the accessible object to be presented.
- offset: the character offset within obj.
"""
self._script.updateBraille(obj)
# [[[TODO: WDW - move the voice selection to formatting.py
# at some point.]]]
#
voices = self._script.voices
if obj.getRole() == pyatspi.ROLE_LINK:
voice = voices[settings.HYPERLINK_VOICE]
else:
voice = voices[settings.DEFAULT_VOICE]
utterances = self._script.speechGenerator.generateSpeech(obj)
speech.speak(utterances, voice)
#########################################################################
# #
# Objects #
# #
#########################################################################
# All structural navigation objects have the following essential
# characteristics:
#
# 1. Keybindings for goPrevious, goNext, and other such methods
# 2. A means of identification (at least a predicate and possibly
# also criteria for generating a collection match rule)
# 3. A definition of how the object should be presented (both
# when another instance of that object is found as well as
# when it is not)
#
# Convenience methods have been put into place whereby one can
# create an object (FOO = "foo"), and then provide the following
# methods: _fooBindings(), _fooPredicate(), _fooCriteria(), and
# _fooPresentation(). With these in place, and with the object
# FOO included among the StructuralNavigation.enabledTypes for
# the script, the structural navigation object should be created
# and set up automagically. At least that is the idea. :-) This
# hopefully will also enable easy re-definition of existing
# objects on a script-by-script basis. For instance, in the
# StarOffice script, overriding the _blockquotePredicate should
# be all that is needed to implement navigation by blockquote
# in OOo Writer documents.
#
########################
# #
# Anchors #
# #
########################
def _anchorBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst anchors.
"""
# NOTE: This doesn't handle the case where the anchor is not an
# old-school <a name/id="foo"></a> anchor. For instance on the
# GNOME wiki, an "anchor" is actually an id applied to some other
# tag (e.g. <h2 id="foo">My Heading</h2>. We'll have to be a
# bit more clever for those. With the old-school anchors, this
# seems to work nicely and provides the user with a way to jump
# among defined areas without having to find a Table of Contents
# group of links (assuming such a thing is even present on the
# page).
bindings = {}
# Translators: this is for navigating among anchors in a document.
# An anchor is a named spot that one can jump to.
#
prevDesc = _("Goes to previous anchor.")
bindings["previous"] = ["a", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among anchors in a document.
# An anchor is a named spot that one can jump to.
#
nextDesc = _("Goes to next anchor.")
bindings["next"] = ["a", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _anchorCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating anchors
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LINK]
state = [pyatspi.STATE_FOCUSABLE]
stateMatch = collection.MATCH_NONE
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _anchorPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an anchor.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LINK:
state = obj.getState()
isMatch = not state.contains(pyatspi.STATE_FOCUSABLE)
return isMatch
def _anchorPresentation(self, obj, arg=None):
"""Presents the anchor or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
# Translators: this is for navigating document content by
# moving from anchor to anchor. (An anchor is a named spot
# that one can jump to.) This is a detailed message which
# will be presented to the user if no more anchors can be found.
#
full = _("No more anchors.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Blockquotes #
# #
########################
def _blockquoteBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating among blockquotes.
"""
bindings = {}
# Translators: this is for navigating among blockquotes in a
# document.
#
prevDesc = _("Goes to previous blockquote.")
bindings["previous"] = ["q", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among blockquotes in a
# document.
#
nextDesc = _("Goes to next blockquote.")
bindings["next"] = ["q", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _blockquoteCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating blockquotes
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
attrs = ['tag:BLOCKQUOTE']
return MatchCriteria(collection, objAttrs=attrs)
def _blockquotePredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a blockquote.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if not obj:
return False
attributes = obj.getAttributes()
if attributes:
for attribute in attributes:
if attribute == "tag:BLOCKQUOTE":
return True
return False
def _blockquotePresentation(self, obj, arg=None):
"""Presents the blockquote or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
# TODO: We currently present the line, so that's kept here.
# But we should probably present the object, which would
# be consistent with the change made recently for headings.
#
self._presentLine(obj, characterOffset)
else:
# Translators: this is for navigating document content by
# moving from blockquote to blockquote. This is a detailed
# message which will be presented to the user if no more
# blockquotes can be found.
#
full = _("No more blockquotes.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Buttons #
# #
########################
def _buttonBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst buttons.
"""
bindings = {}
# Translators: this is for navigating among buttons in a form
# within a document.
#
prevDesc = _("Goes to previous button.")
bindings["previous"] = ["b", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among buttons in a form
# within a document.
#
nextDesc = _("Goes to next button.")
bindings["next"] = ["b", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _buttonCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating buttons
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_PUSH_BUTTON]
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _buttonPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a button.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_PUSH_BUTTON:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
return isMatch
def _buttonPresentation(self, obj, arg=None):
"""Presents the button or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
obj.queryComponent().grabFocus()
else:
# Translators: this is for navigating document content by
# moving from push button to push button in a form. This is
# a detailed message which will be presented to the user if
# no more push buttons can be found.
#
full = _("No more buttons.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Check boxes #
# #
########################
def _checkBoxBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst check boxes.
"""
bindings = {}
# Translators: this is for navigating among check boxes in a form
# within a document.
#
prevDesc = _("Goes to previous check box.")
bindings["previous"] = ["x", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among check boxes in a form
# within a document.
#
nextDesc = _("Goes to next check box.")
bindings["next"] = ["x", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _checkBoxCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating check boxes
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_CHECK_BOX]
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _checkBoxPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a check box.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_CHECK_BOX:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
return isMatch
def _checkBoxPresentation(self, obj, arg=None):
"""Presents the check box or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
obj.queryComponent().grabFocus()
else:
# Translators: this is for navigating document content by
# moving from checkbox to checkbox in a form. This is a
# detailed message which will be presented to the user if
# no more checkboxes can be found.
#
full = _("No more check boxes.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Chunks/Large Objects #
# #
########################
def _chunkBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst chunks/large objects.
"""
bindings = {}
# Translators: this is for navigating a document in a
# structural manner, where a 'large object' is a logical
# chunk of text, such as a paragraph, a list, a table, etc.
#
prevDesc = _("Goes to previous large object.")
bindings["previous"] = ["o", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating a document in a
# structural manner, where a 'large object' is a logical
# chunk of text, such as a paragraph, a list, a table, etc.
#
nextDesc = _("Goes to next large object.")
bindings["next"] = ["o", settings.NO_MODIFIER_MASK, nextDesc]
# I don't think it makes sense to add support for a list
# of chunks. But one could always change that here.
#
return bindings
def _chunkCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating chunks/
large objects by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = self.OBJECT_ROLES
roleMatch = collection.MATCH_ANY
return MatchCriteria(collection,
roles=role,
matchRoles=roleMatch,
applyPredicate=True)
def _chunkPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a chunk.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() in self.OBJECT_ROLES:
try:
text = obj.queryText()
characterCount = text.characterCount
except:
characterCount = 0
if characterCount > settings.largeObjectTextLength \
and not self._isUselessObject(obj):
isMatch = True
return isMatch
def _chunkPresentation(self, obj, arg=None):
"""Presents the chunk or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[newObj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(newObj, characterOffset)
self._presentObject(obj, 0)
else:
# Translators: this is for navigating document content by
# moving from 'large object' to 'large object'. A 'large
# object' is a logical chunk of text, such as a paragraph,
# a list, a table, etc. This is a detailed message which
# will be presented to the user if no more large objects
# can be found.
#
full = _("No more large objects.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Combo Boxes #
# #
########################
def _comboBoxBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst combo boxes.
"""
bindings = {}
# Translators: this is for navigating among combo boxes in a form
# within a document.
#
prevDesc = _("Goes to previous combo box.")
bindings["previous"] = ["c", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among combo boxes in a form
# within a document.
#
nextDesc = _("Goes to next combo box.")
bindings["next"] = ["c", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _comboBoxCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating combo boxes
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_COMBO_BOX]
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _comboBoxPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a combo box.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_COMBO_BOX:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
return isMatch
def _comboBoxPresentation(self, obj, arg=None):
"""Presents the combo box or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
obj.queryComponent().grabFocus()
else:
# Translators: this is for navigating document content by
# moving from combo box to combo box in a form. This is a
# detailed message which will be presented to the user if
# no more checkboxes can be found.
#
full = _("No more combo boxes.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Entries #
# #
########################
def _entryBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst entries.
"""
bindings = {}
# Translators: this is for navigating among text entries in a form
# within a document.
#
prevDesc = _("Goes to previous entry.")
bindings["previous"] = ["e", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among text entries
# in a form.
#
nextDesc = _("Goes to next entry.")
bindings["next"] = ["e", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _entryCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating entries
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_TEXT]
roleMatch = collection.MATCH_ANY
state = [pyatspi.STATE_FOCUSABLE,
pyatspi.STATE_SENSITIVE,
pyatspi.STATE_EDITABLE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role,
matchRoles=roleMatch,
applyPredicate=True)
def _entryPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an entry.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() in [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_TEXT]:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE) \
and state.contains(pyatspi.STATE_EDITABLE)
return isMatch
def _entryPresentation(self, obj, arg=None):
"""Presents the entry or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
obj.queryComponent().grabFocus()
else:
# Translators: this is for navigating document content by
# moving from text entry to text entry in a form. This is
# a detailed message which will be presented to the user if
# no more text entries can be found.
#
full = _("No more entries.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Form Fields #
# #
########################
def _formFieldBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst form fields.
"""
bindings = {}
# Translators: this is for navigating among fields in a form within
# a document.
#
prevDesc = _("Goes to previous form field.")
bindings["previous"] = ["Tab",
settings.ORCA_SHIFT_MODIFIER_MASK,
prevDesc]
# Translators: this is for navigating among fields in a form within
# a document.
#
nextDesc = _("Goes to next form field.")
bindings["next"] = ["Tab", settings.ORCA_MODIFIER_MASK, nextDesc]
return bindings
def _formFieldCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating form fields
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = self.FORM_ROLES
roleMatch = collection.MATCH_ANY
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role,
matchRoles=roleMatch,
applyPredicate=True)
def _formFieldPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a form field.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() in self.FORM_ROLES:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
return isMatch
def _formFieldPresentation(self, obj, arg=None):
"""Presents the form field or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
if obj.getRole() in [pyatspi.ROLE_LIST, pyatspi.ROLE_COMBO_BOX]:
obj.queryComponent().grabFocus()
else:
# TODO: I think we should just grab focus on the object
# regardless of the object type. But that's not what we
# do now, and it causes an extra newline character to show
# up in the regression test output for entries, so for the
# purpose of passing the regression tests, I'm not making
# that change yet.
#
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
# Translators: this is for navigating document content by
# moving from form field to form filed. This is a detailed
# message which will be presented to the user if no more form
# field can be found.
#
full = _("No more form fields.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Headings #
# #
########################
def _headingBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst headings.
"""
bindings = {}
# Translators: this is for navigating in a document by heading.
# (e.g. <h1>)
#
prevDesc = _("Goes to previous heading.")
bindings["previous"] = ["h", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating in a document by heading.
# (e.g., <h1>)
#
nextDesc = _("Goes to next heading.")
bindings["next"] = ["h", settings.NO_MODIFIER_MASK, nextDesc]
prevAtLevelBindings = []
nextAtLevelBindings = []
minLevel, maxLevel = self._headingLevels()
for i in range(minLevel, maxLevel + 1):
# Translators: this is for navigating in a document by heading.
# (e.g. <h1> is a heading at level 1).
#
prevDesc = _("Goes to previous heading at level %d.") % i
prevAtLevelBindings.append([str(i),
settings.SHIFT_MODIFIER_MASK,
prevDesc])
# Translators: this is for navigating in a document by heading.
# (e.g. <h1> is a heading at level 1).
#
nextDesc = _("Goes to next heading at level %d.") % i
nextAtLevelBindings.append([str(i),
settings.NO_MODIFIER_MASK,
nextDesc])
bindings["previousAtLevel"] = prevAtLevelBindings
bindings["nextAtLevel"] = nextAtLevelBindings
return bindings
def _headingLevels(self):
"""Returns the [minimum heading level, maximum heading level]
which should be navigable via structural navigation.
"""
return [1, 6]
def _headingCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating headings
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_HEADING]
attrs = []
if arg:
attrs.append('level:%d' % arg)
return MatchCriteria(collection,
roles=role,
objAttrs=attrs)
def _headingPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a heading.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_HEADING:
if arg:
isMatch = (arg == self._getHeadingLevel(obj))
else:
isMatch = True
return isMatch
def _headingPresentation(self, obj, arg=None):
"""Presents the heading or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
elif not arg:
# Translators: this is for navigating HTML content by moving from
# heading to heading (e.g. <h1>, <h2>, etc). This string is the
# detailed message which Orca will present if there are no more
# headings found.
#
full = _("No more headings.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
else:
# Translators: this is for navigating HTML content by moving from
# heading to heading at a particular level (i.e. only <h1> or only
# <h2>, etc.) This string is the detailed message which Orca will
# present if there are no more headings found at the desired level.
#
full = _("No more headings at level %d.") % arg
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Landmarks #
# #
########################
def _landmarkBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst landmarks.
"""
bindings = {}
# Translators: this is for navigating to the previous ARIA
# role landmark. ARIA role landmarks are the W3C defined
# HTML tag attribute 'role' used to identify important part
# of webpage like banners, main context, search etc.
#
prevDesc = _("Goes to previous landmark.")
bindings["previous"] = ["m", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating to the next ARIA
# role landmark. ARIA role landmarks are the W3C defined
# HTML tag attribute 'role' used to identify important part
# of webpage like banners, main context, search etc.
#
nextDesc = _("Goes to next landmark.")
bindings["next"] = ["m", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _landmarkCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating landmarks
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
# NOTE: there is a limitation in the AT-SPI Collections interface
# when it comes to an attribute whose value can be a list. For
# example, the xml-roles attribute can be a space-separate list
# of roles. We'd like to make a match if the xml-roles attribute
# has one (or any) of the roles we care about. Instead, we're
# restricted to an exact match. So, the below will only work in
# the cases where the xml-roles attribute value consists solely of a
# single role. In practice, this seems to be the case that we run
# into for the landmark roles.
#
attrs = []
for landmark in settings.ariaLandmarks:
attrs.append('xml-roles:' + landmark)
return MatchCriteria(collection, objAttrs=attrs)
def _landmarkPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a landmark.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj is None:
return False
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
try:
if set(attrs['xml-roles']).intersection(\
set(settings.ariaLandmarks)):
return True
else:
return False
except KeyError:
return False
def _landmarkPresentation(self, obj, arg=None):
"""Presents the landmark or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
# Translators: this is for navigating to the previous ARIA
# role landmark. ARIA role landmarks are the W3C defined
# HTML tag attribute 'role' used to identify important part
# of webpage like banners, main context, search etc. This
# is an indication that one was not found.
#
full = _("No landmark found.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Lists #
# #
########################
def _listBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst (un)ordered lists.
"""
bindings = {}
# Translators: this is for navigating among bulleted/numbered
# lists in a document.
#
prevDesc = _("Goes to previous list.")
bindings["previous"] = ["l", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among bulleted/numbered
# lists in a document.
#
nextDesc = _("Goes to next list.")
bindings["next"] = ["l", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _listCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating (un)ordered
lists by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LIST]
state = [pyatspi.STATE_FOCUSABLE]
stateMatch = collection.MATCH_NONE
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _listPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an (un)ordered list.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LIST:
isMatch = not obj.getState().contains(pyatspi.STATE_FOCUSABLE)
return isMatch
def _listPresentation(self, obj, arg=None):
"""Presents the (un)ordered list or indicates that one was not
found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
# TODO: Ultimately it should be the job of the speech (and braille)
# generator to present things like this.
#
if obj:
nItems = 0
for child in obj:
if child.getRole() == pyatspi.ROLE_LIST_ITEM:
nItems += 1
# Translators: this represents a list in HTML.
#
itemString = ngettext("List with %d item",
"List with %d items",
nItems) % nItems
self._script.presentMessage(itemString)
nestingLevel = 0
parent = obj.parent
while parent.getRole() == pyatspi.ROLE_LIST:
nestingLevel += 1
parent = parent.parent
if nestingLevel:
# Translators: this represents a list item in a document.
# The nesting level is how 'deep' the item is (e.g., a
# level of 2 represents a list item inside a list that's
# inside another list).
#
self._script.presentMessage(_("Nesting level %d") % \
nestingLevel)
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentLine(obj, characterOffset)
else:
# Translators: this is for navigating document content by moving
# from bulleted/numbered list to bulleted/numbered list. This
# string is the detailed message which Orca will present if there
# are no more lists found.
#
full = _("No more lists.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# List Items #
# #
########################
def _listItemBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst items in an (un)ordered list.
"""
bindings = {}
# Translators: this is for navigating among bulleted/numbered list
# items in a document.
#
prevDesc = _("Goes to previous list item.")
bindings["previous"] = ["i", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among bulleted/numbered list
# items in a document.
#
nextDesc = _("Goes to next list item.")
bindings["next"] = ["i", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _listItemCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating items in an
(un)ordered list by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LIST_ITEM]
state = [pyatspi.STATE_FOCUSABLE]
stateMatch = collection.MATCH_NONE
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _listItemPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an item in an (un)ordered list.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LIST_ITEM:
isMatch = not obj.getState().contains(pyatspi.STATE_FOCUSABLE)
return isMatch
def _listItemPresentation(self, obj, arg=None):
"""Presents the (un)ordered list item or indicates that one was not
found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
# TODO: We currently present the line, so that's kept here.
# But we should probably present the object, which would
# be consistent with the change made recently for headings.
#
self._presentLine(obj, characterOffset)
else:
# Translators: this is for navigating document content by
# moving from bulleted/numbered list item to bulleted/
# numbered list item. This string is the detailed message
# which Orca will present if there are no more list items found.
#
full = _("No more list items.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Live Regions #
# #
########################
def _liveRegionBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst live regions.
"""
bindings = {}
# Translators: this is for navigating between live regions
#
prevDesc = _("Goes to previous live region.")
bindings["previous"] = ["d", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating between live regions
#
nextDesc = _("Goes to next live region.")
bindings["next"] = ["d", settings.NO_MODIFIER_MASK, nextDesc]
# Translators: this is for navigating to the last live region
# which made an announcement.
#
desc = _("Goes to the last live region which made an announcement.")
bindings["last"] = ["y", settings.NO_MODIFIER_MASK, desc]
return bindings
def _liveRegionPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a live region.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
regobjs = self._script.liveMngr.getLiveNoneObjects()
if self._script.liveMngr.matchLiveRegion(obj) or obj in regobjs:
isMatch = True
return isMatch
def _liveRegionPresentation(self, obj, arg=None):
"""Presents the live region or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
# TODO: We don't want to move to a list item.
# Is this the best place to handle this?
#
if obj.getRole() == pyatspi.ROLE_LIST:
characterOffset = 0
else:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
# For debugging
#
self._script.outlineAccessible(obj)
else:
# Translators: this is for navigating HTML in a structural
# manner, where a 'live region' is a location in a web page
# that are updated without having to refresh the entire page.
#
full = _("No more live regions.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Paragraphs #
# #
########################
def _paragraphBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst paragraphs.
"""
bindings = {}
# Translators: this is for navigating among paragraphs in a document.
#
prevDesc = _("Goes to previous paragraph.")
bindings["previous"] = ["p", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among paragraphs in a document.
#
nextDesc = _("Goes to next paragraph.")
bindings["next"] = ["p", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _paragraphCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating paragraphs
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_PARAGRAPH]
return MatchCriteria(collection, roles=role, applyPredicate=True)
def _paragraphPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a paragraph.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_PARAGRAPH:
try:
text = obj.queryText()
# We're choosing 3 characters as the minimum because some
# paragraphs contain a single image or link and a text
# of length 2: An embedded object character and a space.
# We want to skip these.
#
isMatch = text.characterCount > 2
except:
pass
return isMatch
def _paragraphPresentation(self, obj, arg=None):
"""Presents the paragraph or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[newObj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(newObj, characterOffset)
self._presentObject(obj, 0)
else:
# Translators: this is for navigating document content by moving
# from paragraph to paragraph. This string is the detailed message
# which Orca will present if there are no more paragraphs found.
#
full = _("No more paragraphs.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Radio Buttons #
# #
########################
def _radioButtonBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst radio buttons.
"""
bindings = {}
# Translators: this is for navigating among radio buttons in a
# form within a document.
#
prevDesc = _("Goes to previous radio button.")
bindings["previous"] = ["r", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among radio buttons in a
# form within a document.
#
nextDesc = _("Goes to next radio button.")
bindings["next"] = ["r", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _radioButtonCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating radio buttons
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_RADIO_BUTTON]
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _radioButtonPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a radio button.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_RADIO_BUTTON:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
return isMatch
def _radioButtonPresentation(self, obj, arg=None):
"""Presents the radio button or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
obj.queryComponent().grabFocus()
else:
# Translators: this is for navigating in document content by moving
# from radio button to radio button in a form. This string is the
# detailed message which Orca will present if there are no more
# radio buttons found.
#
full = _("No more radio buttons.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Separators #
# #
########################
def _separatorBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst separators.
"""
bindings = {}
# Translators: this is for navigating among separators, such as the
# <hr> tag, in a document.
#
prevDesc = _("Goes to previous separator.")
bindings["previous"] = ["s", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among separators, such as the
# <hr> tag, in a document.
#
nextDesc = _("Goes to next separator.")
bindings["next"] = ["s", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _separatorCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating separators
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_SEPARATOR]
return MatchCriteria(collection, roles=role, applyPredicate=False)
def _separatorPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a separator.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
return obj and obj.getRole() == pyatspi.ROLE_SEPARATOR
def _separatorPresentation(self, obj, arg=None):
"""Presents the separator or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[newObj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(newObj, characterOffset)
self._presentObject(obj, 0)
else:
# Translators: this is for navigating document content by moving
# amongst separators (e.g. <hr> tags). This string is the detailed
# message which Orca will present if there are no more separators
# found.
#
full = _("No more separators.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Tables #
# #
########################
def _tableBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst tables.
"""
bindings = {}
# Translators: this is for navigating among tables in a document.
#
prevDesc = _("Goes to previous table.")
bindings["previous"] = ["t", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among tables in a document.
#
nextDesc = _("Goes to next table.")
bindings["next"] = ["t", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _tableCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating tables
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_TABLE]
return MatchCriteria(collection, roles=role, applyPredicate=True)
def _tablePredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a table.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj and obj.childCount and obj.getRole() == pyatspi.ROLE_TABLE:
try:
return obj.queryTable().nRows > 0
except:
pass
return False
def _tablePresentation(self, obj, arg=None):
"""Presents the table or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
caption = self._getTableCaption(obj)
if caption:
self._script.presentMessage(caption)
self._script.presentMessage(self._getTableDescription(obj))
cell = obj.queryTable().getAccessibleAt(0, 0)
self.lastTableCell = [0, 0]
[cell, characterOffset] = self._getCaretPosition(cell)
self._setCaretPosition(cell, characterOffset)
self._presentObject(cell, characterOffset)
else:
# Translators: this is for navigating document content by moving
# from table to table. This string is the detailed message which
# Orca will present if there are no more tables found.
#
full = _("No more tables.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Table Cells #
# #
########################
def _tableCellBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating spatially amongst table cells.
"""
bindings = {}
# Translators: this is for navigating among table cells in a document.
#
desc = _("Goes left one cell.")
bindings["left"] = ["Left", settings.SHIFT_ALT_MODIFIER_MASK, desc]
# Translators: this is for navigating among table cells in a document.
#
desc = _("Goes right one cell.")
bindings["right"] = ["Right", settings.SHIFT_ALT_MODIFIER_MASK, desc]
# Translators: this is for navigating among table cells in a document.
#
desc = _("Goes up one cell.")
bindings["up"] = ["Up", settings.SHIFT_ALT_MODIFIER_MASK, desc]
# Translators: this is for navigating among table cells in a document.
#
desc = _("Goes down one cell.")
bindings["down"] = ["Down", settings.SHIFT_ALT_MODIFIER_MASK, desc]
# Translators: this is for navigating among table cells in a document.
#
desc = _("Goes to the first cell in a table.")
bindings["first"] = ["Home", settings.SHIFT_ALT_MODIFIER_MASK, desc]
# Translators: this is for navigating among table cells in a document.
#
desc = _("Goes to the last cell in a table.")
bindings["last"] = ["End", settings.SHIFT_ALT_MODIFIER_MASK, desc]
return bindings
def _tableCellCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating table cells
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_TABLE_CELL, pyatspi.ROLE_COLUMN_HEADER]
return MatchCriteria(collection, roles=role)
def _tableCellPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a table cell.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
return (obj and obj.getRole() in [pyatspi.ROLE_COLUMN_HEADER,
pyatspi.ROLE_TABLE_CELL])
def _tableCellPresentation(self, cell, arg):
"""Presents the table cell or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if not cell:
return
if settings.speakCellHeaders:
self._presentCellHeaders(cell, arg)
[obj, characterOffset] = self._getCaretPosition(cell)
self._setCaretPosition(obj, characterOffset)
self._script.updateBraille(obj)
blank = self._isBlankCell(cell)
if not blank:
self._presentObject(cell, 0)
else:
# Translators: "blank" is a short word to mean the
# user has navigated to an empty line.
#
speech.speak(_("blank"))
if settings.speakCellCoordinates:
[row, col] = self.getCellCoordinates(cell)
# Translators: this represents the (row, col) position of
# a cell in a table.
#
self._script.presentMessage(_("Row %(row)d, column %(column)d.") \
% {"row" : row + 1, "column" : col + 1})
spanString = self._getCellSpanInfo(cell)
if spanString and settings.speakCellSpan:
self._script.presentMessage(spanString)
########################
# #
# Unvisited Links #
# #
########################
def _unvisitedLinkBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst unvisited links.
"""
bindings = {}
# Translators: this is for navigating among unvisited links in a
# document.
#
prevDesc = _("Goes to previous unvisited link.")
bindings["previous"] = ["u", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among unvisited links in a
# document.
#
nextDesc = _("Goes to next unvisited link.")
bindings["next"] = ["u", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _unvisitedLinkCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating unvisited links
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LINK]
state = [pyatspi.STATE_VISITED]
stateMatch = collection.MATCH_NONE
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _unvisitedLinkPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an unvisited link.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LINK:
isMatch = not obj.getState().contains(pyatspi.STATE_VISITED)
return isMatch
def _unvisitedLinkPresentation(self, obj, arg=None):
"""Presents the unvisited link or indicates that one was not
found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
# We were counting on the Gecko script's setCaretPosition
# to do the focus grab. It turns out that we do not always
# want setCaretPosition to grab focus on a link (e.g. when
# arrowing in the text of a paragraph which is a child of
# a link. Therefore, we need to grab focus here.
#
obj.queryComponent().grabFocus()
else:
# Translators: this is for navigating document content by moving
# from unvisited link to unvisited link. This string is the
# detailed message which Orca will present if there are no more
# unvisited links found.
#
full = _("No more unvisited links.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Visited Links #
# #
########################
def _visitedLinkBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst visited links.
"""
bindings = {}
# Translators: this is for navigating among visited links in a
# document.
#
prevDesc = _("Goes to previous visited link.")
bindings["previous"] = ["v", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among visited links in a
# document.
#
nextDesc = _("Goes to next visited link.")
bindings["next"] = ["v", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _visitedLinkCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating visited links
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LINK]
state = [pyatspi.STATE_VISITED]
stateMatch = collection.MATCH_ANY
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _visitedLinkPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a visited link.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LINK:
isMatch = obj.getState().contains(pyatspi.STATE_VISITED)
return isMatch
def _visitedLinkPresentation(self, obj, arg=None):
"""Presents the visited link or indicates that one was not
found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
obj.queryComponent().grabFocus()
else:
# Translators: this is for navigating document content by moving
# from visited link to visited link. This string is the detailed
# message which Orca will present if there are no more visited
# links found.
#
full = _("No more visited links.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
| gpl-3.0 | 3,199,177,288,072,333,300 | 39.519347 | 80 | 0.565077 | false | 4.794823 | false | false | false |
kazuoteramoto/alot | alot/ui.py | 1 | 18751 | # Copyright (C) 2011-2012 Patrick Totzke <[email protected]>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
import urwid
import logging
from twisted.internet import reactor, defer
from settings import settings
from buffers import BufferlistBuffer
import commands
from commands import commandfactory
from alot.commands import CommandParseError
from alot.helper import string_decode
import widgets
class InputWrap(urwid.WidgetWrap):
"""
This is the topmost widget used in the widget tree.
Its purpose is to capture and interpret keypresses
by instantiating and applying the relevant :class:`Command` objects
or relaying them to the wrapped `rootwidget`.
"""
def __init__(self, ui, rootwidget):
urwid.WidgetWrap.__init__(self, rootwidget)
self.ui = ui
self.rootwidget = rootwidget
self.select_cancel_only = False
def set_root(self, w):
self._w = w
def get_root(self):
return self._w
def allowed_command(self, cmd):
"""sanity check if the given command should be applied.
This is used in :meth:`keypress`"""
if not self.select_cancel_only:
return True
elif isinstance(cmd, commands.globals.SendKeypressCommand):
if cmd.key in ['select', 'cancel']:
return True
else:
return False
def keypress(self, size, key):
"""overwrites `urwid.WidgetWrap.keypress`"""
mode = self.ui.mode
if self.select_cancel_only:
mode = 'global'
cmdline = settings.get_keybinding(mode, key)
if cmdline:
try:
cmd = commandfactory(cmdline, mode)
if self.allowed_command(cmd):
self.ui.apply_command(cmd)
return None
except CommandParseError, e:
self.ui.notify(e.message, priority='error')
return self._w.keypress(size, key)
class UI(object):
"""
This class integrates all components of alot and offers
methods for user interaction like :meth:`prompt`, :meth:`notify` etc.
It handles the urwid widget tree and mainloop (we use twisted) and is
responsible for opening, closing and focussing buffers.
"""
buffers = []
"""list of active buffers"""
current_buffer = None
"""points to currently active :class:`~alot.buffers.Buffer`"""
dbman = None
"""Database manager (:class:`~alot.db.DBManager`)"""
def __init__(self, dbman, initialcmd):
"""
:param dbman: :class:`~alot.db.DBManager`
:param initialcmd: commandline applied after setting up interface
:type initialcmd: str
:param colourmode: determines which theme to chose
:type colourmode: int in [1,16,256]
"""
self.dbman = dbman
colourmode = int(settings.get('colourmode'))
logging.info('setup gui in %d colours' % colourmode)
global_att = settings.get_theming_attribute('global', 'body')
self.mainframe = urwid.Frame(urwid.SolidFill())
self.mainframe_themed = urwid.AttrMap(self.mainframe, global_att)
self.inputwrap = InputWrap(self, self.mainframe_themed)
self.mainloop = urwid.MainLoop(self.inputwrap,
handle_mouse=False,
event_loop=urwid.TwistedEventLoop(),
unhandled_input=self.unhandeled_input)
self.mainloop.screen.set_terminal_properties(colors=colourmode)
self.show_statusbar = settings.get('show_statusbar')
self.notificationbar = None
self.mode = 'global'
self.commandprompthistory = []
logging.debug('fire first command')
self.apply_command(initialcmd)
self.mainloop.run()
def unhandeled_input(self, key):
"""called if a keypress is not handled."""
logging.debug('unhandled input: %s' % key)
def keypress(self, key):
"""relay all keypresses to our `InputWrap`"""
self.inputwrap.keypress((150, 20), key)
def show_as_root_until_keypress(self, w, key, relay_rest=True,
afterwards=None):
def oe():
self.inputwrap.set_root(self.mainframe)
self.inputwrap.select_cancel_only = False
if callable(afterwards):
logging.debug('called')
afterwards()
logging.debug('relay: %s' % relay_rest)
helpwrap = widgets.CatchKeyWidgetWrap(w, key, on_catch=oe,
relay_rest=relay_rest)
self.inputwrap.set_root(helpwrap)
self.inputwrap.select_cancel_only = not relay_rest
def prompt(self, prefix, text=u'', completer=None, tab=0, history=[]):
"""prompt for text input
:param prefix: text to print before the input field
:type prefix: str
:param text: initial content of the input field
:type text: str
:param completer: completion object to use
:type completer: :meth:`alot.completion.Completer`
:param tab: number of tabs to press initially
(to select completion results)
:type tab: int
:param history: history to be used for up/down keys
:type history: list of str
:returns: a :class:`twisted.defer.Deferred`
"""
d = defer.Deferred() # create return deferred
oldroot = self.inputwrap.get_root()
def select_or_cancel(text):
# restore main screen and invoke callback
# (delayed return) with given text
self.inputwrap.set_root(oldroot)
self.inputwrap.select_cancel_only = False
d.callback(text)
prefix = prefix + settings.get('prompt_suffix')
#set up widgets
leftpart = urwid.Text(prefix, align='left')
editpart = widgets.CompleteEdit(completer, on_exit=select_or_cancel,
edit_text=text, history=history)
for i in range(tab): # hit some tabs
editpart.keypress((0,), 'tab')
# build promptwidget
both = urwid.Columns(
[
('fixed', len(prefix), leftpart),
('weight', 1, editpart),
])
att = settings.get_theming_attribute('global', 'prompt')
both = urwid.AttrMap(both, att)
# put promptwidget as overlay on main widget
overlay = urwid.Overlay(both, oldroot,
('fixed left', 0),
('fixed right', 0),
('fixed bottom', 1),
None)
self.inputwrap.set_root(overlay)
self.inputwrap.select_cancel_only = True
return d # return deferred
def exit(self):
"""
shuts down user interface without cleaning up.
Use a :class:`commands.globals.ExitCommand` for a clean shutdown.
"""
exit_msg = None
try:
reactor.stop()
except Exception as e:
exit_msg = 'Could not stop reactor: {}.'.format(e)
logging.error(exit_msg + '\nShutting down anyway..')
def buffer_open(self, buf):
"""register and focus new :class:`~alot.buffers.Buffer`."""
if self.current_buffer is not None:
offset = settings.get('bufferclose_focus_offset') * -1
currentindex = self.buffers.index(self.current_buffer)
self.buffers.insert(currentindex + offset, buf)
else:
self.buffers.append(buf)
self.buffer_focus(buf)
def buffer_close(self, buf):
"""
closes given :class:`~alot.buffers.Buffer`.
This it removes it from the bufferlist and calls its cleanup() method.
"""
buffers = self.buffers
if buf not in buffers:
string = 'tried to close unknown buffer: %s. \n\ni have:%s'
logging.error(string % (buf, self.buffers))
elif self.current_buffer == buf:
logging.info('closing current buffer %s' % buf)
index = buffers.index(buf)
buffers.remove(buf)
offset = settings.get('bufferclose_focus_offset')
nextbuffer = buffers[(index + offset) % len(buffers)]
self.buffer_focus(nextbuffer)
buf.cleanup()
else:
string = 'closing buffer %d:%s'
logging.info(string % (buffers.index(buf), buf))
buffers.remove(buf)
buf.cleanup()
def buffer_focus(self, buf):
"""focus given :class:`~alot.buffers.Buffer`."""
if buf not in self.buffers:
logging.error('tried to focus unknown buffer')
else:
if self.current_buffer != buf:
self.current_buffer = buf
self.inputwrap.set_root(self.mainframe_themed)
self.mode = buf.modename
if isinstance(self.current_buffer, BufferlistBuffer):
self.current_buffer.rebuild()
self.update()
def get_deep_focus(self, startfrom=None):
"""return the bottom most focussed widget of the widget tree"""
if not startfrom:
startfrom = self.current_buffer
if 'get_focus' in dir(startfrom):
focus = startfrom.get_focus()
if isinstance(focus, tuple):
focus = focus[0]
if isinstance(focus, urwid.Widget):
return self.get_deep_focus(startfrom=focus)
return startfrom
def get_buffers_of_type(self, t):
"""
returns currently open buffers for a given subclass of
:class:`alot.buffer.Buffer`
"""
return filter(lambda x: isinstance(x, t), self.buffers)
def clear_notify(self, messages):
"""
clears notification popups. Call this to ged rid of messages that don't
time out.
:param messages: The popups to remove. This should be exactly
what :meth:`notify` returned when creating the popup
"""
newpile = self.notificationbar.widget_list
for l in messages:
if l in newpile:
newpile.remove(l)
if newpile:
self.notificationbar = urwid.Pile(newpile)
else:
self.notificationbar = None
self.update()
def choice(self, message, choices={'y': 'yes', 'n': 'no'},
select=None, cancel=None, msg_position='above'):
"""
prompt user to make a choice
:param message: string to display before list of choices
:type message: unicode
:param choices: dict of possible choices
:type choices: dict: keymap->choice (both str)
:param select: choice to return if enter/return is hit. Ignored if set
to `None`.
:type select: str
:param cancel: choice to return if escape is hit. Ignored if set to
`None`.
:type cancel: str
:param msg_position: determines if `message` is above or left of the
prompt. Must be `above` or `left`.
:type msg_position: str
:returns: a :class:`twisted.defer.Deferred`
"""
assert select in choices.values() + [None]
assert cancel in choices.values() + [None]
assert msg_position in ['left', 'above']
d = defer.Deferred() # create return deferred
oldroot = self.inputwrap.get_root()
def select_or_cancel(text):
self.inputwrap.set_root(oldroot)
self.inputwrap.select_cancel_only = False
d.callback(text)
#set up widgets
msgpart = urwid.Text(message)
choicespart = widgets.ChoiceWidget(choices, callback=select_or_cancel,
select=select, cancel=cancel)
# build widget
if msg_position == 'left':
both = urwid.Columns(
[
('fixed', len(message), msgpart),
('weight', 1, choicespart),
], dividechars=1)
else: # above
both = urwid.Pile([msgpart, choicespart])
att = settings.get_theming_attribute('global', 'prompt')
both = urwid.AttrMap(both, att, att)
# put promptwidget as overlay on main widget
overlay = urwid.Overlay(both, oldroot,
('fixed left', 0),
('fixed right', 0),
('fixed bottom', 1),
None)
self.inputwrap.set_root(overlay)
self.inputwrap.select_cancel_only = True
return d # return deferred
def notify(self, message, priority='normal', timeout=0, block=False):
"""
opens notification popup
:param message: message to print
:type message: str
:param priority: priority string, used to format the popup: currently,
'normal' and 'error' are defined. If you use 'X' here,
the attribute 'global_notify_X' is used to format the
popup.
:type priority: str
:param timeout: seconds until message disappears. Defaults to the value
of 'notify_timeout' in the general config section.
A negative value means never time out.
:type timeout: int
:param block: this notification blocks until a keypress is made
:type block: bool
:returns: an urwid widget (this notification) that can be handed to
:meth:`clear_notify` for removal
"""
def build_line(msg, prio):
cols = urwid.Columns([urwid.Text(msg)])
att = settings.get_theming_attribute('global', 'notify_' + prio)
return urwid.AttrMap(cols, att)
msgs = [build_line(message, priority)]
if not self.notificationbar:
self.notificationbar = urwid.Pile(msgs)
else:
newpile = self.notificationbar.widget_list + msgs
self.notificationbar = urwid.Pile(newpile)
self.update()
def clear(*args):
self.clear_notify(msgs)
if block:
# put "cancel to continue" widget as overlay on main widget
txt = urwid.Text('(cancel continues)')
overlay = urwid.Overlay(txt, self.mainframe,
('fixed left', 0),
('fixed right', 0),
('fixed bottom', 0),
None)
self.show_as_root_until_keypress(overlay, 'cancel',
relay_rest=False,
afterwards=clear)
else:
if timeout >= 0:
if timeout == 0:
timeout = settings.get('notify_timeout')
self.mainloop.set_alarm_in(timeout, clear)
return msgs[0]
def update(self):
"""redraw interface"""
#who needs a header?
#head = urwid.Text('notmuch gui')
#h=urwid.AttrMap(head, 'header')
#self.mainframe.set_header(h)
# body
if self.current_buffer:
self.mainframe.set_body(self.current_buffer)
# footer
lines = []
if self.notificationbar: # .get_text()[0] != ' ':
lines.append(self.notificationbar)
if self.show_statusbar:
lines.append(self.build_statusbar())
if lines:
self.mainframe.set_footer(urwid.Pile(lines))
else:
self.mainframe.set_footer(None)
# force a screen redraw
if self.mainloop.screen.started:
self.mainloop.draw_screen()
def build_statusbar(self):
"""construct and return statusbar widget"""
info = {}
cb = self.current_buffer
btype = None
if cb is not None:
info = cb.get_info()
btype = cb.modename
info['buffer_no'] = self.buffers.index(cb)
info['buffer_type'] = btype
info['total_messages'] = self.dbman.count_messages('*')
info['pending_writes'] = len(self.dbman.writequeue)
lefttxt = righttxt = u''
if cb is not None:
lefttxt, righttxt = settings.get(btype + '_statusbar', (u'', u''))
lefttxt = string_decode(lefttxt, 'UTF-8')
lefttxt = lefttxt.format(**info)
righttxt = string_decode(righttxt, 'UTF-8')
righttxt = righttxt.format(**info)
footerleft = urwid.Text(lefttxt, align='left')
pending_writes = len(self.dbman.writequeue)
if pending_writes > 0:
righttxt = ('|' * pending_writes) + ' ' + righttxt
footerright = urwid.Text(righttxt, align='right')
columns = urwid.Columns([
footerleft,
('fixed', len(righttxt), footerright)])
footer_att = settings.get_theming_attribute('global', 'footer')
return urwid.AttrMap(columns, footer_att)
def apply_command(self, cmd):
"""
applies a command
This calls the pre and post hooks attached to the command,
as well as :meth:`cmd.apply`.
:param cmd: an applicable command
:type cmd: :class:`~alot.commands.Command`
"""
if cmd:
# call pre- hook
if cmd.prehook:
logging.info('calling pre-hook')
try:
cmd.prehook(ui=self, dbm=self.dbman)
except:
logging.exception('prehook failed')
return False
# define (callback) function that invokes post-hook
def call_posthook(retval_from_apply):
if cmd.posthook:
logging.info('calling post-hook')
try:
cmd.posthook(ui=self, dbm=self.dbman)
except:
logging.exception('posthook failed')
# define error handler for Failures/Exceptions
# raised in cmd.apply()
def errorHandler(failure):
logging.error(failure.getTraceback())
msg = "Error: %s,\n(check the log for details)"
self.notify(msg % failure.getErrorMessage(), priority='error')
# call cmd.apply
logging.info('apply command: %s' % cmd)
d = defer.maybeDeferred(cmd.apply, self)
d.addErrback(errorHandler)
d.addCallback(call_posthook)
| gpl-3.0 | -3,591,981,657,515,845,000 | 36.880808 | 79 | 0.55997 | false | 4.285943 | false | false | false |
ardoi/datajuicer | lsjuicer/ui/widgets/panels/eventpanel.py | 1 | 3918 | from PyQt5 import QtWidgets as QW
from PyQt5 import QtCore as QC
from lsjuicer.inout.db.sqla import SyntheticData
from lsjuicer.ui.widgets.fileinfowidget import MyFormLikeLayout
from lsjuicer.ui.widgets.clicktrees import EventClickTree, Events
from actionpanel import ActionPanel
from lsjuicer.ui.widgets.mergewidget import MergeDialog
from lsjuicer.ui.widgets.deletewidget import DeleteDialog
class EventPanel(ActionPanel):
__doc__ = """Event display panel"""
__shortname__ = "Events"
active_events_changed = QC.pyqtSignal()
def setup_ui(self):
layout = QW.QVBoxLayout()
combo_layout = MyFormLikeLayout()
layout.addLayout(combo_layout)
self.setLayout(layout)
self.events = None
region_select = QW.QComboBox()
for i,reg in enumerate(self.analysis.fitregions):
region_select.addItem("{}".format(i))
region_select.currentIndexChanged.connect(self.region_changed)
combo_layout.add_row("Region:", region_select)
result_select = QW.QComboBox()
combo_layout.add_row("Result:", result_select)
self.result_select = result_select
result_select.currentIndexChanged.connect(self.result_changed)
clicktree = EventClickTree(self)
self.clicktree = clicktree
layout.addWidget(clicktree)
region_select.setCurrentIndex(0)
self.region_changed(0)
set_data_pb = QW.QPushButton("Set data")
set_data_pb.clicked.connect(self.set_data)
merge_pb = QW.QPushButton("Merge events")
merge_pb.clicked.connect(self.merge_events)
delete_pb = QW.QPushButton("Delete events")
delete_pb.clicked.connect(self.delete_events)
layout.addWidget(set_data_pb)
layout.addWidget(merge_pb)
layout.addWidget(delete_pb)
def _selected_events(self):
selected_events = []
for event_type in self.events.event_dict:
for i, event in enumerate(self.events.event_dict[event_type]):
status = self.events.status_dict[event_type][i]
if status:
selected_events.append(event.id)
return selected_events
def set_data(self):
events_to_show = self._selected_events()
sdata = SyntheticData(self.result)
new = sdata.get_events(events_to_show)
self.imagedata.replace_channel(new, 2)
self.active_events_changed.emit()
def merge_events(self):
events_to_merge = self._selected_events()
if len(events_to_merge) < 2:
QW.QMessageBox.warning(self,'Not enough events',
"At least two events have to be selected for merging")
return
dialog = MergeDialog(events_to_merge,self)
res = dialog.exec_()
if res:
self.result_changed(self.result_select.currentIndex())
def delete_events(self):
events_to_delete = self._selected_events()
if len(events_to_delete) < 1:
QW.QMessageBox.warning(self,'Not enough events',
"At least one event has to be selected for deletion")
return
dialog = DeleteDialog(events_to_delete,self)
res = dialog.exec_()
if res:
self.result_changed(self.result_select.currentIndex())
def region_changed(self, reg_no):
print "\nREgion changed"
self.region = self.analysis.fitregions[reg_no]
self.result_select.clear()
print reg_no, self.region
for i,res in enumerate(self.region.results):
self.result_select.addItem(str(i))
def result_changed(self, res_no):
print "\nResult changed"
self.result = self.region.results[res_no]
print res_no, self.result
self.events = Events()
for ev in self.result.events:
self.events.add_event(ev)
self.clicktree.set_events(self.events)
| gpl-3.0 | 376,748,179,821,630,000 | 36.673077 | 74 | 0.639612 | false | 3.837414 | false | false | false |
nicholas-maltbie/Medina | AIPractice/tttTest.py | 1 | 1416 | from ttt import *
from tttGameSpec import TicTacToeGameSpec
def play_game(agent1, agent2, name1, name2):
"""Plays a game of tic tac toe with two agents and returns the winner."""
game_spec = TicTacToeGameSpec()
return game_spec.play_game(agent1, agent2)
"""board = make_board()
names = [name1, name2]
players = [agent1, agent2]
pieces = [-1,1]
current = random.randint(0,1)
while check_winner(board) == None:
print(get_board_as_numbers(board, pieces[current], pieces[(current + 1) % 2]))
move = players[current](board, pieces[current])
apply_move(board, move)
current = (current + 1) % 2
win = check_winner(board)
if win == 'o':
return name2
elif win == 'x':
return name1
else:
return 'tie'"""
if __name__ == "__main__":
distrib = {'player1':0, 'player2':0, 'tie':0}
plays = 1
for i in range(plays):
distrib[play_game(make_random_agent(), make_human_agent(), \
'player1', 'player2')] += 1;
print('player1 won ' + str(distrib['player1']) + ' times ' + \
str(int(distrib['player1'] / plays * 100)) + "%")
print('player2 won ' + str(distrib['player2']) + ' times ' + \
str(int(distrib['player2'] / plays * 100)) + "%")
print('tied ' + str(distrib['tie']) + ' times ' + \
str(int(distrib['tie'] / plays * 100)) + "%")
| mit | 6,103,644,358,303,188,000 | 36.263158 | 86 | 0.558616 | false | 3.210884 | false | false | false |
DTL-FAIRData/ODEX4all-UseCases | EKP/tmp/NIZO2.py | 1 | 5816 | # Load the required packages
import EKP
import csv
import os
import datetime
# Knowledge platform URL
url = ''
# User credentials: Please fill in!
username = ''
password = ''
# Set the output directory
os.chdir("NIZO input & Output/")
# Get the user token, required for access
t = EKP.getToken(username, password, url).json()['token']
# Get the semantic types contained in the database, and their codes
Types = EKP.getSemanticTypeDict(url, t)
# Read in the input file
input_file = open("List commensal species Qin et al 19_10_2015.csv", "r")
reader = csv.reader(input_file, delimiter=";")
commensals = []
for line in reader:
commensals.append(line[0])
input_file.close()
input_group = "Bacterium"
input_ids = {}
for c in commensals:
ID = EKP.getID(url, Types, t, c, input_group)
if len(ID) > 0:
input_ids.update({ID[0]['name']: ID[0]['id']})
endpoints = {"Gut dysmotility" : "C1839757",
"bowel/gut problem" : "C1656426",
"Inflammatory Bowel Diseases" : "C0021390",
"Intestinal mucosal permeability" : "C0232645",
"Permeability" : "C0232645",
"body barrier" : "C0682585"
}
intermediate_types = { "Food" : "Objects",
"Organ or Tissue Function" : "Physiology",
#"Gene or Genome" : "Genes & Molecular Sequences",
"Finding" : "Disorders",
"Disease or Syndrome" : "Disorders",
"Chemical Viewed Functionally" : "Chemicals & Drugs",
"Biologically Active Substance" : "Chemicals & Drugs",
"Tissue" : "Anatomy",
"Body Location or Region" : "Anatomy",
"Body Part, Organ, or Organ Component" : "Anatomy",
"Body Space or Junction" : "Anatomy",
"Body System" : "Anatomy",
"Cell" : "Anatomy"
}
# Alle concepten die met Gut te maken hebben gebruiken als filter
gut = EKP.getID(url, Types, t, "C0699819")
intestines = EKP.getID(url, Types, t, "C0021853")
endpoint_ids = []
for point in endpoints.values():
endpoint_ids.append(EKP.getID(url, Types, t, point)[0]['id'])
endpoint_ids = list(set(endpoint_ids))
for input in input_ids.values():
print(EKP.getRelationships([input], endpoint_ids, url, t))
indirect_all = []
gut_all = []
intestines_all = []
for key, value in intermediate_types.items():
gut_connected = EKP.getDirectlyConnectedConcepts(Types, t, url, [gut[0]['id']], value, key)
if 'content' in gut_connected.keys() and len(gut_connected['content']) > 0:
for g in gut_connected['content']:
gut_all.append(g['tier1Concept']['gi'])
intestines_connected = EKP.getDirectlyConnectedConcepts(Types, t, url, [intestines[0]['id']], value, key)
if 'content' in intestines_connected.keys() and len(intestines_connected['content']) > 0:
for g in intestines_connected['content']:
intestines_all.append(g['tier1Concept']['gi'])
response = EKP.getIndirectRelationships(list(input_ids.values()), endpoint_ids, Types, url, t, value, key)
print(response)
if 'content' in response.keys():
indirect_all.append(response['content'])
indirect_out = open("indirect_output_" + datetime.datetime.today().strftime("%Y_%m_%d") + ".csv", "w")
iw = csv.writer(indirect_out, delimiter = ";")
iw.writerow(["Starting concept", "Predicate1", "Sources1", "Connecting concept", "Semantic category", "Semantic types", "Found in gut?", "Found in intestines?", "Predicate2", "Sources2", "End concept", "Path weight"])
indirect_all2 = []
for ii in indirect_all:
indirect_all2 = indirect_all2 + ii
for i in indirect_all2:
start = i['tier0Concept']['name']
intermediate = i['tier1Concept']['name']
intermediate_cat = i['tier1Concept']['category']
intermediate_concept = EKP.getConcept(i['tier1Concept']['gi'], url, t)
output_STs = []
for g in intermediate_concept['semanticTypes']:
for key, value in Types[0].items():
if g == value:
output_STs.append(key)
# Hier logica om te filteren op gut & intestines
if i['tier1Concept']['gi'] in gut_all:
gut_bool = "gut"
if i['tier1Concept']['gi'] not in gut_all:
gut_bool = "no"
if i['tier1Concept']['gi'] in intestines_all:
intestines_bool = "intestines"
if i['tier1Concept']['gi'] not in intestines_all:
intestines_bool = "no"
end = i['tier2Concept']['name']
pw = i['pathWeight']
nrows = max([len(i['tier01TripleInformation']), len(i['tier12TripleInformation'])])
pubs1 = []
pubs2 = []
for w in range(0,nrows):
if w <= len(i['tier01TripleInformation']) - 1:
predicate1 = i['tier01TripleInformation'][w]['predicateName']
pub_info = EKP.getPublications(i['tier01TripleInformation'][w]['tripleUuid'], url, t)
for p1 in pub_info['publications']:
if p1['publicationInfo'] is not None and 'url' in p1['publicationInfo'].keys():
pubs1.append(p1['publicationInfo']['url'])
if w <= len(i['tier12TripleInformation']) - 1:
predicate2 = i['tier12TripleInformation'][w]['predicateName']
pub_info2 = EKP.getPublications(i['tier12TripleInformation'][w]['tripleUuid'], url, t)
for p2 in pub_info2['publications']:
if p2['publicationInfo'] is not None and 'url' in p2['publicationInfo'].keys():
pubs2.append(p2['publicationInfo']['url'])
iw.writerow([start, predicate1, pubs1, intermediate, intermediate_cat, output_STs, gut_bool, intestines_bool, predicate2, pubs2, end, pw])
indirect_out.close()
| mit | -8,227,296,890,107,231,000 | 39.388889 | 217 | 0.607634 | false | 3.336776 | true | false | false |
Khurramjaved96/Recursive-CNNs | data_augmentor/augmentData.py | 1 | 2668 | import os
import cv2
import numpy as np
import utils
def argsProcessor():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--dataPath", help="DataPath")
parser.add_argument("-o", "--outputFiles", help="outputFiles", default="bar")
return parser.parse_args()
args = argsProcessor()
output_dir = args.outputFiles
if (not os.path.isdir(output_dir)):
os.mkdir(output_dir)
dir = args.dataPath
import csv
with open(output_dir+"/gt.csv", 'a') as csvfile:
spamwriter_1 = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for image in os.listdir(dir):
if image.endswith("jpg") or image.endswith("JPG"):
if os.path.isfile(dir+"/"+image+".csv"):
with open(dir+"/"+image+ ".csv", 'r') as csvfile:
spamwriter = csv.reader(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
img = cv2.imread(dir +"/"+ image)
print (image)
gt= []
for row in spamwriter:
gt.append(row)
# img = cv2.circle(img, (int(float(row[0])), int(float(row[1]))), 2,(255,0,0),90)
gt =np.array(gt).astype(np.float32)
gt = gt / (img.shape[1], img.shape[0])
gt = gt * (1080, 1080)
img = cv2.resize(img, (1080, 1080))
print (gt)
for angle in range(0,271,90):
img_rotate, gt_rotate = utils.rotate(img, gt, angle)
for random_crop in range(0,16):
img_crop, gt_crop = utils.random_crop(img_rotate, gt_rotate)
mah_size = img_crop.shape
img_crop = cv2.resize(img_crop, (64, 64))
gt_crop = np.array(gt_crop)
# gt_crop = gt_crop*(1.0 / mah_size[1],1.0 / mah_size[0])
# for a in range(0,4):
# no=0
# for a in range(0,4):
# no+=1
# cv2.circle(img_crop, tuple(((gt_crop[a]*64).astype(int))), 2,(255-no*60,no*60,0),9)
# # # cv2.imwrite("asda.jpg", img)
cv2.imwrite(output_dir + "/" +str(angle)+str(random_crop)+ image, img_crop)
spamwriter_1.writerow((str(angle)+str(random_crop)+ image, tuple(list(gt_crop))))
| apache-2.0 | 4,897,393,317,941,922,000 | 38.820896 | 117 | 0.463268 | false | 3.789773 | false | false | false |
vsemionov/npamp | npamp/output.py | 1 | 11750 |
# Copyright (C) 2012 Victor Semionov
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import traceback
import numpy as np
import params
import plot
div_line = "=" * 32
status_writing = "generating output"
output_dir = None
models_rel_path = "pumping"
ref_pulse_rel_path = "ref_pulse"
optimization_rel_path = "optimization"
opt_pump_rel_path = os.path.join(optimization_rel_path, "pumping")
opt_geom_rel_path = os.path.join(optimization_rel_path, "geometry")
alt_plot_rel_path = "alt"
x_label = "x [mm]"
y_label = "y [mm]"
z_label = "z [mm]"
rho_label = "r [mm]"
t_amp_label = "t [ns]"
i_label = "pulse num."
norm_t_label = "t/T"
density_rel_label = "rel. photon density"
density_norm_rel_label = "norm. photon density"
upper_rel_label = "rel. upper state population"
lower_rel_label = "rel. lower state population"
inversion_rel_label = "rel. population inversion"
inversion_abs_label = "population inversion [cm^-3]"
t_pump_label = "t [us]"
pump_duration_label = "pump duration [us]"
pump_power_label = "pump power [W]"
eff_power_density_label = "absorbed power density [W/cm^3]"
rate_label = "depopulation rate [cm^-3 s^-1]"
rate_rel_label = "depop. rate / inversion [s^-1]"
gain_label = "small-signal gain"
error_label = "rel. error"
inversion_rdiff_label = "inversion rel. difference [%]"
gain_rdiff_label = "gain rel. difference [%]"
energy_rel_label = "energy gain"
energy_abs_pump_label = "optical pump energy [J]"
energy_abs_stored_label = "stored energy [J]"
energy_abs_pulse_label = "output energy [mJ]"
rel_gain_decrease_label = "rel. gain decrease [%]"
fluence_rel_label = "rel. fluence"
fluence_norm_rel_label = "norm. fluence"
fluence_abs_label_energy = "max. output fluence [J/cm^2]"
medium_radius_label = "medium diameter [mm]"
beam_radius_label = "beam diameter [mm]"
extraction_eff_label = "extraction efficiency [%]"
total_eff_label = "optical to optical efficiency [%]"
lower_lifetime_legend = r"$\tau_1 \, = \, %s$"
lower_lifetime_unit = "ns"
def warn(message):
print >>sys.stderr, "%s: %s" % ("warning:", message)
def print_error(message, hint=None):
print >>sys.stderr, "%s: %s" % ("error", message)
if hint:
print >>sys.stderr, hint
def print_exception():
t, v, _ = sys.exc_info()
fmt = traceback.format_exception_only(t, v)
exc_msg = fmt[-1][:-1]
print >>sys.stderr, exc_msg
def show_status((i, j), (si, sj), done):
def print_status():
if j is not None:
print "%d, %d" % (i, j)
else:
print i
if si != 0:
if done:
print_status()
else:
if i % si == 0:
if j is None:
print_status()
else:
if sj == 0:
if j == 0:
print_status()
else:
if j % sj == 0:
print_status()
def init_dir(name):
dirname = os.path.join(output_dir, name)
if not os.path.isdir(dirname):
os.makedirs(dirname)
return dirname
def plot_inversion(dirname, inv):
filename = lambda name: os.path.join(dirname, name)
T = inv.T
inversion = inv.inversion
tlim = (T[0], T[-1])
plot.plot_data(filename("inversion_evo"), "Population Inversion Evolution", (T, None, tlim, t_pump_label), (inversion, None, None, inversion_abs_label))
def plot_output(dirname, input_beam, input_pulse, fwhm, amp, fluences, exact_density_out=None, exact_population_final=None):
filename = lambda name: os.path.join(dirname, name)
density = amp.density
population = amp.population
upper = population[0]
lower = population[1]
inversion = upper - lower
Z = amp.Z
T = amp.T
if params.output_rel_time:
T = T / fwhm
TZ, ZT = np.meshgrid(T, Z)
zlim = (Z[0], Z[-1])
tlim = (T[0], T[-1])
ref_density = input_pulse.ref_density
ref_inversion = amp.active_medium.initial_inversion.ref_inversion
out_t_label = norm_t_label if params.output_rel_time else t_amp_label
stride_z = max(len(amp.Z) // params.out_count_z, 1)
stride_t = max(len(amp.T) // params.out_count_t, 1)
plot.plot_data(filename("density_in"), "Input Photon Density", (T, None, tlim, out_t_label), (density[0]/ref_density, None, None, density_rel_label))
plot.plot_data(filename("density_out"), "Output Photon Density", (T, None, tlim, out_t_label), (density[-1]/ref_density, None, None, density_rel_label))
plot.plot_data(filename("densities"), "Input and Output Photon Density", ((T, ) * 2, None, tlim, out_t_label), ((density[0]/ref_density, density[-1]/ref_density), None, None, density_rel_label), ("input pulse", "output pulse"))
plot.plot_data(filename("densities_norm"), "Normalized Input and Output Photon Density", ((T, ) * 2, None, tlim, out_t_label), ((density[0]/ref_density, density[-1]/np.amax(density[-1])), None, None, density_norm_rel_label), ("input pulse", "output pulse"))
plot.plot_data(filename("upper_init"), "Initial Upper State Population", (Z, None, zlim, z_label), (upper.T[0]/ref_inversion, None, None, upper_rel_label))
plot.plot_data(filename("upper_final"), "Final Upper State Population", (Z, None, zlim, z_label), (upper.T[-1]/ref_inversion, None, None, upper_rel_label))
plot.plot_data(filename("lower_init"), "Initial Lower State Population", (Z, None, zlim, z_label), (lower.T[0]/ref_inversion, None, None, lower_rel_label))
plot.plot_data(filename("lower_final"), "Final Lower State Population", (Z, None, zlim, z_label), (lower.T[-1]/ref_inversion, None, None, lower_rel_label))
plot.plot_data(filename("inversion_init"), "Initial Population Inversion", (Z, None, zlim, z_label), (inversion.T[0]/ref_inversion, None, None, inversion_rel_label))
plot.plot_data(filename("inversion_final"), "Final Population Inversion", (Z, None, zlim, z_label), (inversion.T[-1]/ref_inversion, None, None, inversion_rel_label))
plot.plot_projection(filename("density_evo"), "Photon Density Evolution", (ZT, None, z_label), (TZ, None, out_t_label), (density/ref_density, None, density_rel_label), (30, -30), (stride_z, stride_t))
plot.plot_projection(filename("upper_evo"), "Upper State Population Evolution", (ZT, None, z_label), (TZ, None, out_t_label), (upper/ref_inversion, None, upper_rel_label), (30, 30), (stride_z, stride_t))
plot.plot_projection(filename("lower_evo"), "Lower State Population Evolution", (ZT, None, z_label), (TZ, None, out_t_label), (lower/ref_inversion, None, lower_rel_label), (30, 30), (stride_z, stride_t))
plot.plot_projection(filename("inversion_evo"), "Population Inversion Evolution", (ZT, None, z_label), (TZ, None, out_t_label), (inversion/ref_inversion, None, inversion_rel_label), (30, 30), (stride_z, stride_t))
if exact_density_out is not None:
plot.plot_error(filename("density_err"), "Photon Density Relative Error", (T, None, tlim, out_t_label), ((exact_density_out, density[-1]), None, None, error_label))
if exact_population_final is not None:
plot.plot_error(filename("inversion_err"), "Population Inversion Relative Error", (Z, None, zlim, z_label), ((exact_population_final[0] - exact_population_final[1], inversion.T[-1]), None, None, error_label))
if amp.active_medium.doping_agent.lower_lifetime != 0.0:
plot.plot_error(filename("upper_err"), "Upper State Population Relative Error", (Z, None, zlim, z_label), ((exact_population_final[0], upper.T[-1]), None, None, error_label))
plot.plot_error(filename("lower_err"), "Lower State Population Relative Error", (Z, None, zlim, z_label), ((exact_population_final[1], lower.T[-1]), None, None, error_label))
norm_fluences = fluences / input_beam.ref_fluence
plot.plot_data(filename("fluence"), "Fluence Evolution", (Z, None, zlim, z_label), (norm_fluences, None, None, fluence_rel_label))
def plot_train(dirname, input_beam, active_medium, output_photon_counts):
filename = lambda name: os.path.join(dirname, name)
pulse_count = len(output_photon_counts)
pulse_nums = np.arange(1, pulse_count + 1)
nlim = (pulse_nums[0] - 1, pulse_nums[-1] + 1)
extra_args = dict(style="o", vlines=True, grid="y") if pulse_count <= 32 else {}
input_photon_count = input_beam.fluence_integral(active_medium.radius)
plot.plot_data(filename("pulse_energy_gain"), "Pulse Energy Gain", (pulse_nums, None, nlim, i_label), (output_photon_counts/input_photon_count, None, None, energy_rel_label), **extra_args)
def plot_beam(dirname, input_beam, Rho, Phi, ref_output_fluence):
filename = lambda name: os.path.join(dirname, name)
if len(Rho) > 1:
vfluence = np.vectorize(input_beam.fluence)
ref_input_fluence = vfluence(*np.meshgrid(Rho, Phi)).T
norm_input_fluence = ref_input_fluence / input_beam.ref_fluence
norm_output_fluence = ref_output_fluence / input_beam.ref_fluence
max_output_fluence = np.amax(norm_output_fluence)
n_ref = -1
for n, phi in enumerate(Phi):
if n_ref < 0 or abs(phi - input_beam.phi_ref) < abs(Phi[n_ref] - input_beam.phi_ref):
n_ref = n
rholim = (Rho[0], Rho[-1])
plot.plot_data(filename("fluences"), "Input and Output Fluence", ((Rho,)*2, None, rholim, rho_label), ((norm_input_fluence[:, n_ref], norm_output_fluence[:, n_ref]), None, None, fluence_rel_label), ("input beam", "output beam"))
plot.plot_data(filename("fluences_norm"), "Normalized Input and Output Fluence", ((Rho,)*2, None, rholim, rho_label), ((norm_input_fluence[:, n_ref], norm_output_fluence[:, n_ref] / max_output_fluence), None, None, fluence_norm_rel_label), ("input beam", "output beam"))
if len(Phi) > 1:
FR, RF = np.meshgrid(Phi, Rho)
XY, YX = RF * np.cos(FR), RF * np.sin(FR)
stride_rho = max(len(Rho) // params.out_count_rho, 1)
stride_phi = max(len(Phi) // params.out_count_phi, 1)
plot.plot_projection(filename("fluence_in"), "Input Fluence", (XY, None, x_label), (YX, None, y_label), (norm_input_fluence, None, fluence_rel_label), (30, -60), (stride_rho, stride_phi))
plot.plot_projection(filename("fluence_out"), "Output Fluence", (XY, None, x_label), (YX, None, y_label), (norm_output_fluence, None, fluence_rel_label), (30, -60), (stride_rho, stride_phi))
| bsd-2-clause | -5,244,082,922,388,500,000 | 49.646552 | 278 | 0.655234 | false | 3.151824 | false | false | false |
living180/vex | vex/remove.py | 1 | 1329 | import os
import shutil
from vex import exceptions
def obviously_not_a_virtualenv(path):
include = os.path.join(path, 'include')
bin = os.path.join(path, 'bin')
scripts = os.path.join(path, 'Scripts')
if not os.path.exists(bin) and not os.path.exists(scripts):
return True
if os.path.exists(include) and not any(
filename.startswith('py') for filename in os.listdir(include)
):
return True
return False
def handle_remove(ve_path):
if not os.path.exists(ve_path):
return
if hasattr(os, "geteuid"):
if os.geteuid() == 0 or os.environ.get('USER', '') == 'root':
raise exceptions.VirtualenvNotRemoved(
"not removing any directory as root user")
if ve_path in ("/", "\\"):
raise exceptions.VirtualenvNotRemoved(
"not removing possible root directory {0!r}".format(ve_path))
if ve_path == os.path.expanduser("~"):
raise exceptions.VirtualenvNotRemoved(
"not removing possible home directory {0!r}".format(ve_path))
# last-minute checks
if obviously_not_a_virtualenv(ve_path):
raise exceptions.VirtualenvNotRemoved(
"path {0!r} did not look like a virtualenv".format(ve_path))
print("Removing {0!r}".format(ve_path))
shutil.rmtree(ve_path)
| mit | -4,692,091,804,238,745,000 | 34.918919 | 73 | 0.632054 | false | 3.691667 | false | false | false |
cginternals/glkernel | scripts/generate.py | 1 | 19818 |
import posixpath # instead of os.path, to always use forward slashes
import os
import re
# TODOs:
# (more TODOs in code)
standardTypes = {
"bool",
"char",
"short",
"int",
"long",
"long long",
"unsigned char",
"unsigned short",
"unsigned int",
"unsigned long",
"unsigned long long",
"float",
"double",
"long double",
"size_t",
"glm::uint16"
}
# ------------
# large-scale parsing
def findPairedBrace(code):
nl = 1
for i,c in enumerate(code):
if c == '}': nl -= 1
if c == '{': nl += 1
if nl == 0:
return i
def getNamespaces(code):
namespaces = dict()
global namespaceBeginPattern
namespaceBeginPattern = re.compile(r"^namespace(?:\s+(?P<name>\w+))?\s*\{", re.M | re.S)
lastEnd = 0
for match in namespaceBeginPattern.finditer(code):
# skip inner namespaces
if match.start() < lastEnd:
continue
nsStart = match.end() # behind opening brace
nsEnd = findPairedBrace(code[nsStart:]) + nsStart # index of closing brace
subNamespaces = getNamespaces(code[nsStart:nsEnd])
namespaces[(nsStart,nsEnd)] = (match.group("name") or "<unnamed>", subNamespaces)
# remember end for skipping inner namespaces
lastEnd = nsEnd
return namespaces
def namespaceAtPosition(namespaces, pos):
for span in namespaces:
if pos in range(*span):
innerNS = namespaceAtPosition(namespaces[span][1], pos - span[0])
return namespaces[span][0] + ("::" + innerNS if innerNS else "")
return ""
# ------------
# small-scale parsing
def removeCVRef(typeString):
return re.sub(r'^(?:const |volatile )*(.*?)(?:\s*&)?$', r'\1', typeString)
def splitParams(paramString):
splitParams = [p.strip() for p in paramString.split(',') if p.strip()]
i = 0
while i < len(splitParams)-1:
if splitParams[i].count('<') != splitParams[i].count('>'):
splitParams[i:i+2] = [splitParams[i] + ", " + splitParams[i+1]]
else:
i += 1
paramDefaults = [(split[0].strip(), split[1].strip() if len(split) > 1 else '') for split in [p.rsplit('=', 1) for p in splitParams]]
paramsSplit = [(l.strip(), r.strip(), d) for l,r,d in [p.rsplit(' ', 1) + [d] for p,d in paramDefaults]]
return paramsSplit
def removeParamDefaults(params):
return [(p[0], p[1]) for p in params]
def getParamNames(params):
return [p[1] for p in params]
def getParamTypes(params):
return [p[0] for p in params]
def getParamDefaults(params):
return [(p[1], p[2]) for p in params if p[2]]
def possibleTypes(argType, templateList):
if re.match("^\w+$", argType): # argType is just single word, e.g. 'T'
if "std::enable_if<std::is_floating_point<"+argType+">::value>::type" in templateList:
return {"float"}
else:
return {"float", "vec2", "vec3", "vec4"}
genVecMatch = re.match("(\w+)\s*<\s*\w+\s*,\s*\w+\s*>", argType) # general glm vector, e.g. 'V<T, P>'
if genVecMatch:
if re.search("template\s*<\s*(?:typename|class)\s*,\s*glm::precision\s*>\s*(?:typename|class)\s*" + genVecMatch.group(1), templateList):
return {"vec2", "vec3", "vec4"}
specVecMatch = re.match("glm::tvec(\d)<.*?>", argType) # specific glm vector, e.g. 'glm::tcev4<T, P>'
if specVecMatch:
return {"vec"+specVecMatch.group(1)}
return {argType}
def paramTypeFromKernelTypes(kernelTypeString, paramTypeString, templateList, enums):
if possibleTypes(paramTypeString, templateList) == {'float'}:
return "float"
strippedTypeString = removeCVRef(paramTypeString)
if kernelTypeString == strippedTypeString: # e.g. 'V<T, P>' and 'const V<T, P>&'
return "same"
if strippedTypeString in kernelTypeString: # e.g. 'const T&' and 'V<T, P>'
return "float"
if strippedTypeString in [e["name"] for e in enums]:
return strippedTypeString
if strippedTypeString in standardTypes:
return strippedTypeString
print("Unknown Type encountered: " + paramTypeString)
def getEnumValues(valueDefString):
definitions = [d.strip() for d in valueDefString.split(',')]
values = []
i = 0
for d in definitions:
if '=' in d:
_, _, expr = d.partition('=')
i = eval(expr, dict(values))
values.append((d,i))
i += 1
return values
# ------------
# generation
def enumForJS(value, enums):
if "::" not in value:
return value
enumDict = {enum["name"]: {valueName:value for valueName, value in enum["values"]} for enum in enums}
enumName, _, valueName = value.partition("::")
if enumName not in enumDict:
# TODO: Warning?
return value
if valueName not in enumDict[enumName]:
# TODO: Warning?
return value
return enumName + "." + valueName
def jsFuncName(func):
name = func["name"]
if "alternativeNumber" in func:
name += str(func["alternativeNumber"])
return "_".join(func["namespace"].split('::')[1:] + [name])
def jsFunction(func, enums):
assert func["namespace"].startswith("glkernel::"), "function \""+func["name"]+"\" from outside glkernel namespace: " + func["namespace"]
namespaceStack = func["namespace"].split("::")
namespaceStack.pop(0) # ignore outmost namespace glkernel
defaultChecks = '\n'.join([" {name} = (typeof {name} !== 'undefined') ? {name} : {default};".format(name=name, default=enumForJS(default, enums)) for name, default in getParamDefaults(func["params"])])
if defaultChecks:
defaultChecks = "\n // Defaults\n" + defaultChecks + "\n"
paramString = ', '.join(getParamNames(func["params"]))
paramStringKomma = "" if not paramString else ', ' + paramString
firstLine = " {name}: function({params}) {{".format(name = func["name"], params = paramString)
finalCall = " _glkernel.{generatedName}(that.kernel{paramsWithKomma});".format(generatedName = jsFuncName(func), paramsWithKomma = paramStringKomma)
jsCode = """{firstLine}{defaultChecks}
{finalCall}
return that;
}}""".format(firstLine = firstLine, defaultChecks = defaultChecks, finalCall = finalCall)
return jsCode
def buildJSNamespaces(funcs, enums):
namespaces = dict()
for func in funcs:
if func["namespace"] not in namespaces:
namespaces[func["namespace"]] = []
namespaces[func["namespace"]].append(jsFunction(func, enums))
nsCodes = []
for ns, codes in sorted(namespaces.items()):
name = ns[len("glkernel::"):]
functionsCode = ",\n".join(codes)
nsCode = " this.{name} = {{\n{funcCodes}\n }};".format(name = name, funcCodes = functionsCode)
nsCodes.append(nsCode)
return "\n".join(nsCodes)
def buildJSEnums(enums):
enumCodes = []
for enum in sorted(enums, key=lambda e: e["name"]):
valueLines = []
for name, value in enum["values"]:
valueLines.append(" " + name + ": " + str(value))
valuesCode = ',\n'.join(valueLines)
enumCode = "{name} = {{\n{members}\n}};".format(name = enum["name"], members = valuesCode)
enumCodes.append(enumCode)
return "\n\n".join(enumCodes)
def buildCPPFunctionAdds(funcs):
return '\n'.join([' addFunction("{name}", this, &JSInterface::{name});'.format(name = jsFuncName(func)) for func in funcs])
def buildCPPFunctionForwardDecl(func, enums):
enumNames = [enum["name"] for enum in enums]
funcName = jsFuncName(func)
# Deduce parameter types
kernelTypes = possibleTypes(func["kernelType"], func["template"])
paramTypes = [paramTypeFromKernelTypes(func["kernelType"], param[0], func["template"], enums) for param in func["params"]]
cases = [(kernelType, [kernelType if param == "same" else param for param in paramTypes]) for kernelType in kernelTypes]
if "alternatives" in func:
for alt in func["alternatives"]:
altKernelTypes = possibleTypes(alt["kernelType"], alt["template"])
altParamTypes = [paramTypeFromKernelTypes(alt["kernelType"], param[0], alt["template"], enums) for param in alt["params"]]
cases += [(kernelType, [kernelType if param == "same" else param for param in altParamTypes]) for kernelType in altKernelTypes]
cases.sort()
typesPerParam = [{case[1][i] for case in cases} for i in range(len(cases[0][1]))]
variantNeeded = [len(types) > 1 for types in typesPerParam]
enumParam = [list(types)[0] in enumNames for types in typesPerParam]
paramTypes = ["cppexpose::Object*"] + ["const cppexpose::Variant&" if needVariant else "int" if isEnum else list(types)[0] for types, needVariant, isEnum in zip(typesPerParam, variantNeeded, enumParam)]
paramNames = ["obj"] + [param[1] for param in func["params"]]
paramList = ", ".join(type + " " + name for type,name in zip(paramTypes, paramNames))
return " void " + funcName + "(" + paramList + ");"
def buildCPPFunctionForwardDecls(funcs, enums):
return '\n'.join([buildCPPFunctionForwardDecl(func, enums) for func in funcs])
def buildCPPIncludes(fileNames):
includeFiles = []
for f in fileNames:
if not "include/" in f:
print("Error: " + f + " is outside include directory!")
continue
while not f.startswith("include/"):
f = f[1:]
f = f[len("include/"):]
includeFiles.append(f)
return '\n'.join(['#include <' + name + '>' for name in includeFiles])
def buildCPPImplementation(func, enums):
enumNames = [enum["name"] for enum in enums]
funcName = jsFuncName(func)
# Deduce parameter types
kernelTypes = possibleTypes(func["kernelType"], func["template"])
paramTypes = [paramTypeFromKernelTypes(func["kernelType"], param[0], func["template"], enums) for param in func["params"]]
cases = [(kernelType, [kernelType if param == "same" else param for param in paramTypes]) for kernelType in kernelTypes]
if "alternatives" in func:
for alt in func["alternatives"]:
altKernelTypes = possibleTypes(alt["kernelType"], alt["template"])
altParamTypes = [paramTypeFromKernelTypes(alt["kernelType"], param[0], alt["template"], enums) for param in alt["params"]]
cases += [(kernelType, [kernelType if param == "same" else param for param in altParamTypes]) for kernelType in altKernelTypes]
cases.sort()
typesPerParam = [{case[1][i] for case in cases} for i in range(len(cases[0][1]))]
variantNeeded = [len(types) > 1 for types in typesPerParam]
enumParam = [list(types)[0] in enumNames for types in typesPerParam]
paramTypes = ["cppexpose::Object*"] + ["const cppexpose::Variant&" if needVariant else "int" if isEnum else list(types)[0] for types, needVariant, isEnum in zip(typesPerParam, variantNeeded, enumParam)]
paramNames = ["obj"] + [param[1] for param in func["params"]]
paramList = ", ".join(type + " " + name for type,name in zip(paramTypes, paramNames))
# Parameters with only one possible type may be handled before branching into kernel types
earlyConv = []
for param, enumType in [(name, list(types)[0]) for name, types, isEnum in zip(paramNames[1:], typesPerParam, enumParam) if isEnum]:
enum = [e for e in enums if e["name"] == enumType][0]
earlyConv.append(" const auto {name}_enum = static_cast<{namespace}::{type}>({name});".format(name=param, type=enum["name"], namespace = enum["namespace"]))
earlyConversions = '\n'.join(earlyConv)
if earlyConversions:
earlyConversions += '\n\n'
# Split cases by kernel type
casesByKernelType = dict()
for kernel, params in cases:
if kernel not in casesByKernelType:
casesByKernelType[kernel] = []
casesByKernelType[kernel].append(params)
# Build code for different kernel types
kernelCases = []
for kernelType, cases in sorted(casesByKernelType.items()):
kernelDim = 1 if kernelType == "float" else int(kernelType[-1])
firstLine = " if (auto kernelObj = dynamic_cast<Kernel" + str(kernelDim) + "Object*>(obj))"
neededVariantChecks = False
# Build code for specific parameter type constellations
paramCases = []
for case in cases:
# Check if variants contain acceptable values
variantChecks = []
for name, type, needsVariant in zip(paramNames[1:], case, variantNeeded):
if not needsVariant:
continue
checkFunction = "canBe" + type[0].upper() + type[1:]
variantChecks.append(checkFunction + "(" + name + ")")
neededVariantChecks = True
# Unpack variants to usable values
variantUnpackers = []
for name, type, needsVariant in zip(paramNames[1:], case, variantNeeded):
if not needsVariant:
continue
convFunction = "variantTo" + type[0].upper() + type[1:]
variantUnpackers.append(" const auto {name}_conv = {func}({name});".format(name = name, func = convFunction))
variantUnpackingCode = '\n'.join(variantUnpackers)
if variantUnpackingCode:
variantUnpackingCode += '\n\n'
finalCallParams = ["kernelObj->kernel()"] + [name + ("_enum" if isEnum else "_conv" if needsVariant else "") for name, isEnum, needsVariant in zip(paramNames[1:], enumParam, variantNeeded)]
finalCallParamString = ', '.join(finalCallParams)
finalCallString = " {namespace}::{name}({params});".format(namespace = func["namespace"], name = func["name"], params = finalCallParamString)
innerCode = "{variants}{finalCall}\n return;".format(variants = variantUnpackingCode, finalCall = finalCallString)
caseCode = innerCode
if variantChecks:
variantCheckCode = ' && '.join(variantChecks)
indentedInnerCode = '\n'.join([(" " + line).rstrip() for line in innerCode.split('\n')])
caseCode = " if ({varChecks})\n {{\n{innerCode}\n }}".format(varChecks = variantCheckCode, innerCode = indentedInnerCode)
paramCases.append(caseCode)
if neededVariantChecks:
paramCases.append(" cppassist::error(\"glkernel-JSInterface\") << \"Invalid parameters for " + funcName + "\";\n return;")
paramCasesCode = '\n\n'.join(paramCases)
kernelCaseCode = "{firstLine}\n {{\n{cases}\n }}".format(firstLine = firstLine, cases = paramCasesCode)
kernelCases.append(kernelCaseCode)
kernelCasesCode = '\n\n'.join(kernelCases)
fullCode = """void JSInterface::{funcName}({paramList})
{{
{earlyConv}{cases}
cppassist::error("glkernel-JSInterface") << "Invalid kernel object for {funcName}";
}}""".format(funcName = funcName, paramList = paramList, earlyConv = earlyConversions, cases = kernelCasesCode)
return fullCode
def buildCPPImplementations(funcs, enums):
return '\n\n\n'.join([buildCPPImplementation(func, enums) for func in funcs])
# ------------
# misc
def dedupeFuncs(funcs):
i = 1
while i < len(funcs):
currentFunc = funcs[i]
for otherFunc in funcs[:i]:
if otherFunc["namespace"] != currentFunc["namespace"]:
continue
if otherFunc["name"] != currentFunc["name"]:
continue
if getParamNames(otherFunc["params"]) == getParamNames(currentFunc["params"]):
# identical in JS -> can be safely removed
funcs.remove(currentFunc)
i -= 1
if "alternatives" not in otherFunc:
otherFunc["alternatives"] = []
otherFunc["alternatives"].append(currentFunc)
break
if "renamedAlternatives" not in otherFunc:
otherFunc["renamedAlternatives"] = 0
otherFunc["renamedAlternatives"] += 1
currentFunc["alternativeNumber"] = otherFunc["renamedAlternatives"]
break
i += 1
# ------------
# main
def main(args):
glkernelIncludeDir = "../source/glkernel/include/glkernel"
sourceFiles = [posixpath.join(glkernelIncludeDir, p) for p in os.listdir(glkernelIncludeDir) if p not in ["Kernel.h", "glm_compatability.h"] and p.endswith(".h")]
funcPattern = re.compile(r"^template\s*<(?P<template>.*?)>$\s*^(?P<return>\w+)\s(?P<name>\w+)\(\s*tkernel<(?P<kernelType>.*?)>\s*&\s*\w+\s*(?P<params>(?:,.*?)*)\);$", re.M | re.S)
enumPattern = re.compile(r"^enum(?:\s+class)?\s+(?P<name>\w+)\s*(?::.*?\s*)?\{(?P<content>.*?)\};$", re.M | re.S)
allFunctions = []
allEnums = []
for f in sourceFiles:
content = ''
with open(f,'r') as file:
content = file.read()
namespaces = getNamespaces(content)
functionMatches = [m for m in funcPattern.finditer(content)]
functions = [{
"name": f.group("name"),
"kernelType": f.group("kernelType"),
"namespace": namespaceAtPosition(namespaces, f.start()),
"params": splitParams(f.group("params")),
"return": f.group("return"),
"template": f.group("template")
} for f in functionMatches]
enumMatches = [m for m in enumPattern.finditer(content)]
enums = [{
"name": e.group("name"),
"values": getEnumValues(e.group("content")),
"namespace": namespaceAtPosition(namespaces, e.start())
} for e in enumMatches]
allFunctions.extend(functions)
allEnums.extend(enums)
dedupeFuncs(allFunctions)
funcsJSCode = buildJSNamespaces(allFunctions, allEnums)
enumJSCode = buildJSEnums(allEnums)
templateDir = args.inDir
cppDestDir = args.cppDir
jsDestDir = args.jsDir
with open(templateDir + "/glkernel.js.template", "r") as templateFile:
with open(jsDestDir + "/glkernel.js", "w") as outFile:
outFile.write(templateFile.read().format(enums=enumJSCode, functions=funcsJSCode))
forwardDecls = buildCPPFunctionForwardDecls(allFunctions, allEnums)
with open(templateDir + "/JSInterface.h.template", "r") as templateFile:
with open(cppDestDir + "/JSInterface.h", "w") as outFile:
outFile.write(templateFile.read().format(functionForwardDecls=forwardDecls))
includes = buildCPPIncludes(sourceFiles)
funcAdds = buildCPPFunctionAdds(allFunctions)
funcImpl = buildCPPImplementations(allFunctions, allEnums)
with open(templateDir + "/JSInterface.cpp.template", "r") as templateFile:
with open(cppDestDir + "/JSInterface.cpp", "w") as outFile:
outFile.write(templateFile.read().format(includes=includes, addFunctionCalls=funcAdds, generatedFunctions=funcImpl))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--templates", "-t", metavar="<dir>", type=str, default=".", dest="inDir", help="directory containing template files")
parser.add_argument("--cpp-dest" , "-c", metavar="<dir>", type=str, default=".", dest="cppDir", help="directory where result .h and .cpp files are written to")
parser.add_argument("--js-dest" , "-j", metavar="<dir>", type=str, default=".", dest="jsDir", help="directory where result .js files are written to")
args = parser.parse_args()
main(args)
| mit | 3,420,205,758,163,455,500 | 37.481553 | 220 | 0.612171 | false | 3.753409 | false | false | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-monitor/azure/mgmt/monitor/models/rule_action.py | 1 | 1307 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RuleAction(Model):
"""The action that is performed when the alert rule becomes active, and when
an alert condition is resolved.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: RuleEmailAction, RuleWebhookAction
:param odatatype: Constant filled by server.
:type odatatype: str
"""
_validation = {
'odatatype': {'required': True},
}
_attribute_map = {
'odatatype': {'key': 'odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odatatype': {'Microsoft.Azure.Management.Insights.Models.RuleEmailAction': 'RuleEmailAction', 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction': 'RuleWebhookAction'}
}
def __init__(self):
self.odatatype = None
| mit | -2,687,682,908,228,144,000 | 32.512821 | 187 | 0.613619 | false | 4.400673 | false | false | false |
UManPychron/pychron | pychron/experiment/experimentor.py | 1 | 11553 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Instance, List, on_trait_change, Bool, Event
from pychron.dvc.dvc_irradiationable import DVCIrradiationable
from pychron.experiment.experiment_executor import ExperimentExecutor
from pychron.experiment.factory import ExperimentFactory
from pychron.experiment.queue.experiment_queue import ExperimentQueue
class Experimentor(DVCIrradiationable):
experiment_factory = Instance(ExperimentFactory)
experiment_queue = Instance(ExperimentQueue)
executor = Instance(ExperimentExecutor)
experiment_queues = List
# stats = Instance(StatsGroup, ())
mode = None
# unique_executor_db = False
save_enabled = Bool
# ===========================================================================
# permissions
# ===========================================================================
# max_allowable_runs = 10000
# can_edit_scripts = True
# _last_ver_time = None
# _ver_timeout = 10
# ===========================================================================
# task events
# ===========================================================================
activate_editor_event = Event
save_event = Event
def prepare_destory(self):
if self.executor:
if self.executor.datahub:
self.executor.datahub.prepare_destroy()
if self.experiment_factory:
if self.experiment_factory.run_factory:
if self.experiment_factory.run_factory.datahub:
self.experiment_factory.run_factory.datahub.prepare_destroy()
def load(self):
self.experiment_factory.queue_factory.db_refresh_needed = True
self.experiment_factory.run_factory.db_refresh_needed = True
return True
def reset_run_generator(self):
if self.executor.is_alive():
self.debug('Queue modified. Reset run generator')
# self.executor.queue_modified = True
self.executor.set_queue_modified()
def refresh_executable(self, qs=None):
if qs is None:
qs = self.experiment_queues
if self.executor.is_alive():
qs = (self.executor.experiment_queue,)
self.executor.executable = all([ei.is_executable() for ei in qs])
self.debug('setting executable {}'.format(self.executor.executable))
def update_queues(self):
self._update_queues()
def update_info(self):
try:
self._update()
except BaseException as e:
self.debug_exception()
self.warning_dialog('Failed updating info: Error={}'.format(e))
# ===============================================================================
# info update
# ===============================================================================
def _get_all_automated_runs(self, qs=None):
if qs is None:
qs = self.experiment_queues
return [ai for ei in qs
for ai in ei.automated_runs
if ai.executable]
def _update(self, queues=None):
self.debug('update runs')
if queues is None:
queues = self.experiment_queues
queues = [qi for qi in queues if qi.is_updateable()]
if not queues:
return
self.debug('executor executable {}'.format(self.executor.executable))
self.debug('updating stats, ')
self.executor.stats.calculate()
self.refresh_executable(queues)
self._set_analysis_metadata()
self.debug('info updated')
for qi in queues:
qi.refresh_table_needed = True
def _set_analysis_metadata(self):
cache = dict()
db = self.get_database()
aruns = self._get_all_automated_runs()
with db.session_ctx():
for ai in aruns:
if ai.skip:
continue
ln = ai.labnumber
if ln == 'dg':
continue
# is run in cache
if ln not in cache:
info = db.get_identifier_info(ln)
self.debug('Info for {}={}'.format(ln, info))
if not info:
cache[ln] = dict(identifier_error=True)
else:
info['identifier_error'] = False
cache[ln] = info
ai.trait_set(**cache[ln])
def execute_queues(self, queues):
names = ','.join([e.name for e in queues])
self.debug('queues: n={}, names={}'.format(len(queues), names))
self.executor.trait_set(experiment_queues=queues, experiment_queue=queues[0])
return self.executor.execute()
def verify_database_connection(self, inform=True):
db = self.get_database()
if db is not None:
if db.connect(force=True):
return True
elif inform:
self.warning_dialog('No Database available')
def sync_queue(self, queue):
ms = queue.mass_spectrometer
ed = queue.extract_device
db = self.get_database()
with db.session_ctx():
next_pos = None
for i, ai in enumerate(queue.automated_runs):
if ai.skip or ai.is_special():
continue
kw = {'identifier': ai.identifier, 'position': ai.position,
'mass_spectrometer': ms.lower(),
'extract_device': ed}
if ai.is_step_heat():
kw['aliquot'] = ai.aliquot
kw['extract_value'] = ai.extract_value
self.debug('checking {}/{}. attr={}'.format(i, ai.runid, kw))
aa = db.get_analysis_by_attr(**kw)
if aa is None:
self.debug('----- not found')
if next_pos == ai:
i -= 1
break
elif not self.confirmation_dialog('Found analyses up to {}. '
'position={}, extract={}. '
'Continue searching?'.format(ai.runid, ai.extract_value,
ai.position)):
break
next_pos = queue.automated_runs[i + 1]
if i:
if i == len(queue.automated_runs) - 1:
self.information_dialog('All Analyses from this experiment have been run')
else:
queue.automated_runs = queue.automated_runs[i:]
else:
self.information_dialog('No Analyses from this experiment have been run')
# ===============================================================================
# handlers
# ===============================================================================
def _experiment_queue_changed(self, eq):
if eq:
self.experiment_factory.queue = eq
self.experiment_factory.sync_queue_meta()
self.experiment_factory.edit_enabled = True
else:
self.experiment_factory.edit_enabled = False
@on_trait_change('executor:experiment_queue')
def _activate_editor(self, eq):
self.activate_editor_event = id(eq)
@on_trait_change('experiment_queues[]')
def _update_queues(self):
qs = self.experiment_queues
self.executor.stats.experiment_queues = qs
@on_trait_change('experiment_factory:run_factory:changed')
def _queue_dirty(self):
self.experiment_queue.changed = True
@on_trait_change('experiment_queue:dclicked')
def _dclicked_changed(self, new):
self.experiment_factory.run_factory.edit_mode = True
self._set_factory_runs(self.experiment_queue.selected)
@on_trait_change('experiment_factory:run_factory:update_info_needed')
def _refresh3(self):
self.debug('update info needed fired')
self.update_info()
@on_trait_change('executor:queue_modified')
def _refresh5(self, new):
if new:
self.debug('queue modified fired')
self.update_info()
@on_trait_change('experiment_factory:run_factory:refresh_table_needed')
def _refresh4(self):
for qi in self.experiment_queues:
qi.refresh_table_needed = True
@on_trait_change('experiment_factory:save_button')
def _save_update(self):
self.save_event = True
self.update_info()
@on_trait_change('experiment_queue:refresh_info_needed')
def _handle_refresh(self):
self.update_info()
@on_trait_change('experiment_queue:selected')
def _selected_changed(self, new):
ef = self.experiment_factory
rf = ef.run_factory
rf.edit_mode = False
if new:
self._set_factory_runs(new)
# if self.executor.is_alive():
a = new[-1]
if not a.skip:
self.executor.stats.calculate_at(a, at_times=self.executor.is_alive())
# self.stats.calculate()
@on_trait_change('experiment_factory:queue_factory:delay_between_analyses')
def handle_delay_between_analyses(self, new):
if self.executor.is_alive():
self.executor.experiment_queue.delay_between_analyses = new
def _set_factory_runs(self, new):
ef = self.experiment_factory
rf = ef.run_factory
# print 'set runs'
# rf.special_labnumber = 'Special Labnumber'
rf.suppress_update = True
rf.set_selected_runs(new)
rf.suppress_update = False
def _executor_factory(self):
e = ExperimentExecutor(mode=self.mode,
application=self.application)
e.bind_preferences()
return e
# ===============================================================================
# defaults
# ===============================================================================
def _executor_default(self):
return self._executor_factory()
def _experiment_factory_default(self):
dms = 'Spectrometer'
if self.application:
p2 = 'pychron.spectrometer.base_spectrometer_manager.BaseSpectrometerManager'
spec = self.application.get_service(p2)
if spec:
dms = spec.name.capitalize()
e = ExperimentFactory(application=self.application,
dvc=self.dvc,
default_mass_spectrometer=dms)
return e
# ============= EOF =============================================
| apache-2.0 | -4,886,870,572,820,441,000 | 34.990654 | 110 | 0.521769 | false | 4.566403 | false | false | false |
DramaFever/calcifer | calcifer/tree.py | 1 | 9183 | """
`calcifer.tree` module
This module implements a non-deterministic nested dictionary (tree).
The tree comprises leaf nodes, dict nodes, and "unknown nodes" -- nodes
which are known to exist but undefined beyond that.
Ultimately, the policy tree contains *definitions*, a higher-level abstraction
on "value": LeafPolicyNode uses the property `definition`, which may compare
to specific values or generate a template for procuring the value.
"""
from abc import ABCMeta, abstractmethod
import logging
from calcifer.definitions import Value
logger = logging.getLogger(__name__)
class PolicyNode:
"""
Abstract class for node tree.
"""
__metaclass__ = ABCMeta
@abstractmethod
def get_template(self):
"""
Generate the template for the node (recursively)
"""
pass
@abstractmethod
def select(self, path=None):
"""
Traverse the tree and retrieve a specific node with a given path.
`select` retrieves existing nodes or populates default nodes based
on path values.
Returns a tuple of (selected_node, new_root)
"""
if not path:
return (self, self)
@abstractmethod
def match(self, value):
"""
`match` compares a node with a given value, possibly returning an
altered node in the process. For unknown nodes, this means populating
the node with a leaf node defined as having that value.
For nodes with a more complex definition, the behavior of `match`
defers to the definition of the node.
"""
return False, self
@abstractmethod
def choose(self, step):
"""
Moves down the given step and returns:
(the chosen node, the new version of itself (list or dict), and a dict of the steps not taken)
"""
return (None, None, {})
@abstractmethod
def reconstruct(self, possible_steps):
"""
This method takes in a dictionary of possible steps that could be taken and returns a node object
"""
raise NotImplementedError
@staticmethod
def from_obj(obj):
"""
To facilitate converting nested dict data structures, the static
method `from_obj` recursively constructs a PolicyNode tree from
an object
"""
if isinstance(obj, PolicyNode):
return obj
if isinstance(obj, dict):
return DictPolicyNode(**obj)
if isinstance(obj, list):
return ListPolicyNode(*obj)
return LeafPolicyNode(Value(obj))
class UnknownPolicyNode(PolicyNode):
def __init__(self):
pass
@property
def value(self):
return None
def reconstruct(self, possible_steps):
raise TypeError
def get_template(self):
return {}
def choose(self, step):
if isinstance(step, int):
new_self = ListPolicyNode()
steps_not_taken = {k: UnknownPolicyNode() for k in range(step)}
else:
new_self = DictPolicyNode()
steps_not_taken = {}
return (UnknownPolicyNode(), new_self, steps_not_taken)
def select(self, path=None):
if not path:
return (self, self)
# recurse
first = path[0]
rest = path[1:]
value, subpolicy = UnknownPolicyNode().select(rest)
return value, DictPolicyNode(**{first: subpolicy})
def match(self, value):
return True, LeafPolicyNode(Value(value))
def __repr__(self):
return "UnknownPolicyNode()"
def __eq__(self, other):
return isinstance(other, UnknownPolicyNode)
class LeafPolicyNode(PolicyNode):
def __init__(self, definition=None):
self._definition = definition
@property
def definition(self):
return self._definition
@property
def value(self):
return self._definition.value
def reconstruct(self, possible_steps):
if possible_steps:
raise TypeError
return self.__class__(self._definition)
def get_template(self):
return self.definition.get_template()
def choose(self, step):
raise TypeError("You're at the end dummy!")
def select(self, path=None):
if path:
logger.debug((
"Attempting to select sub-path %r of %r"
), path, self)
raise Exception(
"Node cannot be traversed, attempted sub-path: {}".format(path)
)
return (self, self)
def match(self, value):
matches, new_definition = self.definition.match(value)
return matches, LeafPolicyNode(new_definition)
def __repr__(self):
return (
"LeafPolicyNode("
"definition={definition}"
")"
).format(definition=self.definition)
def __eq__(self, other):
return (
isinstance(other, LeafPolicyNode) and
other.definition == self.definition
)
class DictPolicyNode(PolicyNode):
def __init__(self, **nodes):
self._nodes = {
k: PolicyNode.from_obj(v)
for k, v in nodes.items()
}
@property
def nodes(self):
return self._nodes
@property
def keys(self):
return self._nodes.keys()
@property
def value(self):
return {
name: node.value
for name, node in self.nodes.items()
}
def reconstruct(self, possible_steps):
return DictPolicyNode(**possible_steps)
def choose(self, step):
chosen_node = self._nodes.get(step, UnknownPolicyNode())
new_self = self
steps_not_taken = {k: v for k, v in self._nodes.items() if k != step}
return chosen_node, new_self, steps_not_taken
def get_template(self):
return {
k: v.get_template() for k, v in self.nodes.items()
}
def select(self, path=None):
if not path:
return (self, self)
first = path[0]
rest = path[1:]
node, new_first = self[first].select(rest)
new_nodes = {k: v for k, v in self.nodes.items()}
new_nodes[first] = new_first
return node, DictPolicyNode(**new_nodes)
def match(self, value):
return False, self
def __setitem__(self, key, node):
self._nodes[key] = node
def __getitem__(self, key):
if key not in self._nodes:
return UnknownPolicyNode()
return self._nodes[key]
def __repr__(self):
args = ['{}={}'.format(k, v) for k, v in self.nodes.items()]
return "DictPolicyNode({})".format(", ".join(args))
def __eq__(self, other):
return (
isinstance(other, DictPolicyNode) and
other.nodes == self.nodes
)
class ListPolicyNode(PolicyNode):
def __init__(self, *nodes):
self._nodes = [
PolicyNode.from_obj(v)
for v in nodes
]
@property
def nodes(self):
return self._nodes
@property
def keys(self):
return [key for key in range(len(self._nodes))]
@property
def value(self):
return [
node.value
for node in self.nodes
]
def reconstruct(self, possible_steps):
if not possible_steps:
return ListPolicyNode()
highest_key = sorted(possible_steps.keys(), reverse=True)[0]
return ListPolicyNode(*[
possible_steps.get(i, UnknownPolicyNode())
for i in range(highest_key + 1)
])
def choose(self, step):
if len(self._nodes) > step:
# We have the step for sure
chosen_node = self._nodes[step]
else:
# step does not exist yet, must populate list with UnknownPolicyNodes
chosen_node = UnknownPolicyNode()
new_self = self
steps_not_taken = {i: self._nodes[i] for i in range(len(self._nodes)) if i != step}
return chosen_node, new_self, steps_not_taken
def get_template(self):
return [
v.get_template() for v in self.nodes
]
def select(self, path=None):
if not path:
return (self, self)
first = int(path[0])
rest = path[1:]
node, new_first = self[first].select(rest)
new_nodes = [v for v in self.nodes]
new_nodes[first] = new_first
return node, ListPolicyNode(*new_nodes)
def match(self, value):
return False, self
def __setitem__(self, key, node):
key = int(key)
sparsity = key - len(self._nodes) + 1
self._nodes.extend([UnknownPolicyNode()] * sparsity)
self._nodes[key] = node
def __getitem__(self, key):
try:
key = int(key)
return self._nodes[int(key)]
except:
return UnknownPolicyNode()
def __repr__(self):
args = ['{}'.format(v) for v in self.nodes]
return "ListPolicyNode({})".format(", ".join(args))
def __eq__(self, other):
return (
isinstance(other, ListPolicyNode) and
other.nodes == self.nodes
)
| mit | 1,653,217,216,247,063,300 | 25.850877 | 105 | 0.5765 | false | 4.231797 | false | false | false |
amol9/fbstats | fbstats/main.py | 1 | 2731 | import sys
import os
from os.path import exists
from mutils.system.scheduler import get_scheduler, PlatformError, FrequencyError
from redcmd import subcmd, CommandLine, CommandLineError, CommandError
from . import globals
from .fb import FB
from .action import Action
@subcmd
def job():
'''Run fbstats as a job.'''
fb = FB()
fb.add_job_perid()
fb.get_friends()
fb.update_stream()
fb.get_stream_job()
fb.clean_duplicates()
fb.get_likes()
fb.get_comments()
@subcmd
def schedule(self):
'Commands to schedule fb stats collection.'
pass
@subcmd(parent='schedule')
def add(frequency):
'''Add schedule.
frequency: time frequency for changing wallpaper'''
scheduler = get_scheduler()
try:
scheduler.schedule(frequency, 'fbstats job', globals.scheduler_taskname)
print('schedule created..')
except (PlatformError, FrequencyError) as e:
print(e)
raise CommandError()
add.__extrahelp__ = Scheduler.frequency_help + os.linesep
add.__extrahelp__ += 'If schedule already exists, it\'ll be overwritten'
@subcmd(parent='schedule')
def remove():
'Remove schedule.'
try:
scheduler = get_scheduler()
scheduler.remove()
print('schedule removed..')
except (PlatformError, FrequencyError) as e:
print(e)
raise CommandError()
@subcmd
def plot():
'Commands to plot various charts.'
pass
@subcmd(parent='plot')
def likes(count=10):
'''Plot top users by likes count.
count: number of users to plot'''
pass
@subcmd(parent='plot')
def posts(count=10):
'''Plot top users by posts count.
count: number of users to plot'''
pass
@subcmd(parent='plot')
def timeline(first_name, last_name):
'''Plot a user's timeline in terms of posts count.
first_name: first name of the user
last_name: last name of the user'''
pass
@subcmd(parent='plot')
def graph(start_date, end_date):
'''Plot a graph of users connected by count of their likes and comments.
start_date: start date of posts
end_date: end date of posts
Date must be of the form: ddmmmyyyy, e.g. 26jan2015.'''
fb = FB()
fb.render_graph(start=start_date, end=end_date)
@subcmd
def setapp():
'Set app id and app secret.'
db_path = joinpath(globals.data_dir, globals.db_name)
db = DBManager(db_path)
db.connect()
fba = FBAccess(db)
fba.prompt_app_details()
db.disconnect()
def check_data_dir():
if not exists(globals.data_dir):
os.mkdir(globals.data_dir)
def main():
check_data_dir()
action.register("plot\s+(?P<type>user_posts)\s+(?P<first_name>\w+)\s+(?P<last_name>\w+)", fb.render_plot)
action.register("plot\s+(?P<type>\w+)(?:\s+(?P<count>\d+))?", fb.render_plot, {'type': str, 'count': int})
commandline = CommandLine()
try:
commandline.execute()
except CommandLineError as e:
print(e)
| mit | 7,284,842,101,302,858,000 | 18.789855 | 107 | 0.70011 | false | 2.946063 | false | false | false |
awong1900/platformio | platformio/maintenance.py | 1 | 7117 | # Copyright (C) Ivan Kravets <[email protected]>
# See LICENSE for details.
import re
import struct
from os import remove
from os.path import isdir, isfile, join
from shutil import rmtree
from time import time
import click
from platformio import __version__, app, telemetry
from platformio.commands.install import cli as cmd_install
from platformio.commands.lib import lib_update as cmd_libraries_update
from platformio.commands.update import cli as cli_update
from platformio.commands.upgrade import get_latest_version
from platformio.exception import GetLatestVersionError, UpgraderFailed
from platformio.libmanager import LibraryManager
from platformio.platforms.base import PlatformFactory
from platformio.util import get_home_dir, get_lib_dir
def on_platformio_start(ctx):
telemetry.on_command(ctx)
after_upgrade(ctx)
check_platformio_upgrade()
check_internal_updates(ctx, "platforms")
check_internal_updates(ctx, "libraries")
def on_platformio_end(ctx, result): # pylint: disable=W0613
pass
def on_platformio_exception(e):
telemetry.on_exception(e)
class Upgrader(object):
def __init__(self, from_version, to_version):
self.from_version = self.version_to_int(from_version)
self.to_version = self.version_to_int(to_version)
self._upgraders = (
(self.version_to_int("0.9.0"), self._upgrade_to_0_9_0),
(self.version_to_int("1.0.0"), self._upgrade_to_1_0_0)
)
@staticmethod
def version_to_int(version):
match = re.match(r"(\d+)\.(\d+)\.(\d+)(\D+)?", version)
assert match is not None and len(match.groups()) is 4
verchrs = [chr(int(match.group(i))) for i in range(1, 4)]
verchrs.append(chr(255 if match.group(4) is None else 0))
return struct.unpack(">I", "".join(verchrs))
def run(self, ctx):
if self.from_version > self.to_version:
return True
result = [True]
for item in self._upgraders:
if self.from_version >= item[0]:
continue
result.append(item[1](ctx))
return all(result)
def _upgrade_to_0_9_0(self, ctx): # pylint: disable=R0201
prev_platforms = []
# remove platform's folder (obsoleted package structure)
for name in PlatformFactory.get_platforms().keys():
pdir = join(get_home_dir(), name)
if not isdir(pdir):
continue
prev_platforms.append(name)
rmtree(pdir)
# remove unused files
for fname in (".pioupgrade", "installed.json"):
if isfile(join(get_home_dir(), fname)):
remove(join(get_home_dir(), fname))
if prev_platforms:
ctx.invoke(cmd_install, platforms=prev_platforms)
return True
def _upgrade_to_1_0_0(self, ctx): # pylint: disable=R0201
installed_platforms = PlatformFactory.get_platforms(
installed=True).keys()
if installed_platforms:
ctx.invoke(cmd_install, platforms=installed_platforms)
ctx.invoke(cli_update)
return True
def after_upgrade(ctx):
last_version = app.get_state_item("last_version", "0.0.0")
if last_version == __version__:
return
# promotion
click.echo("\nIf you like %s, please:" % (
click.style("PlatformIO", fg="cyan")
))
click.echo(
"- %s us on Twitter to stay up-to-date "
"on the latest project news > %s" %
(click.style("follow", fg="cyan"),
click.style("https://twitter.com/PlatformIO_Org", fg="cyan"))
)
click.echo("- %s us a star on GitHub > %s" % (
click.style("give", fg="cyan"),
click.style("https://github.com/ivankravets/platformio", fg="cyan")
))
click.secho("Thanks a lot!\n", fg="green")
if last_version == "0.0.0":
app.set_state_item("last_version", __version__)
return
click.secho("Please wait while upgrading PlatformIO ...",
fg="yellow")
u = Upgrader(last_version, __version__)
if u.run(ctx):
app.set_state_item("last_version", __version__)
click.secho("PlatformIO has been successfully upgraded to %s!\n" %
__version__, fg="green")
telemetry.on_event(category="Auto", action="Upgrade",
label="%s > %s" % (last_version, __version__))
else:
raise UpgraderFailed()
click.echo("")
def check_platformio_upgrade():
last_check = app.get_state_item("last_check", {})
interval = int(app.get_setting("check_platformio_interval")) * 3600 * 24
if (time() - interval) < last_check.get("platformio_upgrade", 0):
return
last_check['platformio_upgrade'] = int(time())
app.set_state_item("last_check", last_check)
try:
latest_version = get_latest_version()
except GetLatestVersionError:
click.secho("Failed to check for PlatformIO upgrades", fg="red")
return
if (latest_version == __version__ or
Upgrader.version_to_int(latest_version) <
Upgrader.version_to_int(__version__)):
return
click.secho("There is a new version %s of PlatformIO available.\n"
"Please upgrade it via " % latest_version,
fg="yellow", nl=False)
click.secho("platformio upgrade", fg="cyan", nl=False)
click.secho(" command.\nChanges: ", fg="yellow", nl=False)
click.secho("http://docs.platformio.org/en/latest/history.html\n",
fg="cyan")
def check_internal_updates(ctx, what):
last_check = app.get_state_item("last_check", {})
interval = int(app.get_setting("check_%s_interval" % what)) * 3600 * 24
if (time() - interval) < last_check.get(what + "_update", 0):
return
last_check[what + '_update'] = int(time())
app.set_state_item("last_check", last_check)
outdated_items = []
if what == "platforms":
for platform in PlatformFactory.get_platforms(installed=True).keys():
p = PlatformFactory.newPlatform(platform)
if p.is_outdated():
outdated_items.append(platform)
elif what == "libraries":
lm = LibraryManager(get_lib_dir())
outdated_items = lm.get_outdated()
if not outdated_items:
return
click.secho("There are the new updates for %s (%s)" %
(what, ", ".join(outdated_items)), fg="yellow")
if not app.get_setting("auto_update_" + what):
click.secho("Please update them via ", fg="yellow", nl=False)
click.secho("`platformio %supdate`" %
("lib " if what == "libraries" else ""),
fg="cyan", nl=False)
click.secho(" command.\n", fg="yellow")
else:
click.secho("Please wait while updating %s ..." % what, fg="yellow")
if what == "platforms":
ctx.invoke(cli_update)
elif what == "libraries":
ctx.invoke(cmd_libraries_update)
click.echo()
telemetry.on_event(category="Auto", action="Update",
label=what.title())
| mit | -8,118,982,638,193,287,000 | 32.729858 | 77 | 0.603344 | false | 3.651616 | true | false | false |
RPi-Distro/pgzero | pgzero/rect.py | 1 | 15799 | # -*- coding: utf-8 -*-
import pygame.rect
class Rect(pygame.rect.Rect):
__slots__ = ()
# From Pygame docs
VALID_ATTRIBUTES = """
x y
top left bottom right
topleft bottomleft topright bottomright
midtop midleft midbottom midright
center centerx centery
size width height
w h
""".split()
def __setattr__(self, key, value):
try:
pygame.rect.Rect.__setattr__(self, key, value)
except AttributeError as e:
from .spellcheck import suggest
suggestions = suggest(key, self.VALID_ATTRIBUTES)
msg = e.args[0]
if suggestions:
msg += "; did you mean {!r}?".format(suggestions[0])
raise AttributeError(msg) from None
Rect.__doc__ = pygame.rect.Rect.__doc__
class NoIntersect(Exception):
pass
class ZRect:
"""ZRect
This is a Python implementation of the pygame Rect class. Its raison
d'être is to allow the coordinates to be floating point. All pygame
functions which require a rect allow for an object with a "rect"
attribute and whose coordinates will be converted to integers implictly.
All functions which require a dict will use the flexible constructor
to convert from: this (or a subclass); a Pygame Rect; a 4-tuple or a
pair of 2-tuples. In addition, they'll recognise any object which has
an (optionally callable) .rect attribute whose value will be used instead.
"""
_item_mapping = dict(enumerate("xywh"))
def __init__(self, *args):
if len(args) == 1:
args = tuple(self._handle_one_arg(args[0]))
#
# At this point we have one of:
#
# x, y, w, h
# (x, y), (w, h)
# (x, y, w, h),
#
if len(args) == 4:
self.x, self.y, self.w, self.h = args
elif len(args) == 2:
(self.x, self.y), (self.w, self.h) = args
elif len(args) == 1:
self.x, self.y, self.w, self.h = args[0]
else:
raise TypeError("%s should be called with one, two or four arguments" % (cls.__name__))
self.rect = self
def _handle_one_arg(self, arg):
"""Handle -- possibly recursively -- the case of one parameter
Pygame -- and consequently pgzero -- is very accommodating when constructing
a rect. You can pass four integers, two pairs of 2-tuples, or one 4-tuple.
Also, you can pass an existing Rect-like object, or an object with a .rect
attribute. The object named by the .rect attribute is either one of the above,
or it's a callable object which returns one of the above.
This is evidently a recursive solution where an object with a .rect
attribute can yield an object with a .rect attribute, and so ad infinitum.
"""
#
# If the arg is an existing rect, return its elements
#
if isinstance(arg, RECT_CLASSES):
return arg.x, arg.y, arg.w, arg.h
#
# If it's something with a .rect attribute, start again with
# that attribute, calling it first if it's callable
#
if hasattr(arg, "rect"):
rectobj = arg.rect
if callable(rectobj):
rectobj = rectobj()
return self._handle_one_arg(rectobj)
#
# Otherwise, we assume it's an iterable of four elements
#
return arg
def __repr__(self):
return "<%s (x: %s, y: %s, w: %s, h: %s)>" % (self.__class__.__name__, self.x, self.y, self.w, self.h)
def __reduce__(self):
return self.__class__, (self.x, self.y, self.w, self.h)
def copy(self):
return self.__class__(self.x, self.y, self.w, self.h)
__copy__ = copy
def __len__(self):
return 4
def __getitem__(self, item):
try:
return getattr(self, self._item_mapping[item])
except KeyError:
raise IndexError
def __setitem__(self, item, value):
try:
attribute = self._item_mapping[item]
except KeyError:
raise IndexError
else:
setattr(attribute, value)
def __bool__(self):
return self.w != 0 and self.h != 0
def __iter__(self):
yield self.x
yield self.y
yield self.w
yield self.h
def __hash__(self):
raise TypeError("ZRect instances may not be used as dictionary keys")
def __eq__(self, *other):
rect = self.__class__(*other)
return (self.x, self.y, self.w, self.h) == (rect.x, rect.y, rect.w, rect.h)
def __ne__(self, *other):
rect = self.__class__(*other)
return (self.x, self.y, self.w, self.h) != (rect.x, rect.y, rect.w, rect.h)
def __lt__(self, *other):
rect = self.__class__(*other)
return (self.x, self.y, self.w, self.h) < (rect.x, rect.y, rect.w, rect.h)
def __gt__(self, *other):
rect = self.__class__(*other)
return (self.x, self.y, self.w, self.h) > (rect.x, rect.y, rect.w, rect.h)
def __le__(self, *other):
rect = self.__class__(*other)
return (self.x, self.y, self.w, self.h) <= (rect.x, rect.y, rect.w, rect.h)
def __ge__(self, *other):
rect = self.__class__(*other)
return (self.x, self.y, self.w, self.h) >= (rect.x, rect.y, rect.w, rect.h)
def __contains__(self, other):
"""Test whether a point (x, y) or another rectangle
(anything accepted by ZRect) is contained within this ZRect
"""
if len(other) == 2:
return self.collidepoint(*other)
else:
return self.contains(*other)
def _get_width(self):
return self.w
def _set_width(self, width):
self.w = width
width = property(_get_width, _set_width)
def _get_height(self):
return self.h
def _set_height(self, height):
self.h = height
height = property(_get_height, _set_height)
def _get_top(self):
return self.y
def _set_top(self, top):
self.y = top
top = property(_get_top, _set_top)
def _get_left(self):
return self.x
def _set_left(self, left):
self.x = left
left = property(_get_left, _set_left)
def _get_right(self):
return self.x + self.w
def _set_right(self, right):
self.x = right - self.w
right = property(_get_right, _set_right)
def _get_bottom(self):
return self.y + self.h
def _set_bottom(self, bottom):
self.y = bottom - self.h
bottom = property(_get_bottom, _set_bottom)
def _get_centerx(self):
return self.x + (self.w / 2)
def _set_centerx(self, centerx):
self.x = centerx - (self.w / 2)
centerx = property(_get_centerx, _set_centerx)
def _get_centery(self):
return self.y + (self.h / 2)
def _set_centery(self, centery):
self.y = centery - (self.h / 2)
centery = property(_get_centery, _set_centery)
def _get_topleft(self):
return self.x, self.y
def _set_topleft(self, topleft):
self.x, self.y = topleft
topleft = property(_get_topleft, _set_topleft)
def _get_topright(self):
return self.x + self.w, self.y
def _set_topright(self, topright):
x, y = topright
self.x = x - self.w
self.y = y
topright = property(_get_topright, _set_topright)
def _get_bottomleft(self):
return self.x, self.y + self.h
def _set_bottomleft(self, bottomleft):
x, y = bottomleft
self.x = x
self.y = y - self.h
bottomleft = property(_get_bottomleft, _set_bottomleft)
def _get_bottomright(self):
return self.x + self.w, self.y + self.h
def _set_bottomright(self, bottomright):
x, y = bottomright
self.x = x - self.w
self.y = y - self.h
bottomright = property(_get_bottomright, _set_bottomright)
def _get_midtop(self):
return self.x + self.w / 2, self.y
def _set_midtop(self, midtop):
x, y = midtop
self.x = x - self.w / 2
self.y = y
midtop = property(_get_midtop, _set_midtop)
def _get_midleft(self):
return self.x, self.y + self.h / 2
def _set_midleft(self, midleft):
x, y = midleft
self.x = x
self.y = y - self.h / 2
midleft = property(_get_midleft, _set_midleft)
def _get_midbottom(self):
return self.x + self.w / 2, self.y + self.h
def _set_midbottom(self, midbottom):
x, y = midbottom
self.x = x - self.w / 2
self.y = y - self.h
midbottom = property(_get_midbottom, _set_midbottom)
def _get_midright(self):
return self.x + self.w, self.y + self.h / 2
def _set_midright(self, midright):
x, y = midright
self.x = x - self.w
self.y = y - self.h / 2
midright = property(_get_midright, _set_midright)
def _get_center(self):
return self.x + self.w / 2, self.y + self.h / 2
def _set_center(self, center):
x, y = center
self.x = x - self.w / 2
self.y = y - self.h / 2
center = property(_get_center, _set_center)
def _get_size(self):
return self.w, self.h
def _set_size(self, size):
self.w, self.h = size
size = property(_get_size, _set_size)
def move(self, x, y):
return self.__class__(self.x + x, self.y + y, self.w, self.h)
def move_ip(self, x, y):
self.x += x
self.y += y
def _inflated(self, x, y):
return self.x - x / 2, self.y - y / 2, self.w + x, self.h + y
def inflate(self, x, y):
return self.__class__(*self._inflated(x, y))
def inflate_ip(self, x, y):
self.x, self.y, self.w, self.h = self._inflated(x, y)
def _clamped(self, *other):
rect = self.__class__(*other)
if self.w >= rect.w:
x = rect.x + rect.w / 2 - self.w / 2
elif self.x < rect.x:
x = rect.x
elif self.x + self.w > rect.x + rect.w:
x = rect.x + rect.w - self.w
else:
x = self.x
if self.h >= rect.h:
y = rect.y + rect.h / 2 - self.h / 2
elif self.y < rect.y:
y = rect.y
elif self.y + self.h > rect.y + rect.h:
y = rect.y + rect.h - self.h
else:
y = self.y
return x, y
def clamp(self, *other):
rect = self.__class__(*other)
x, y = self._clamped(rect)
return self.__class__(x, y, self.w, self.h)
def clamp_ip(self, *other):
rect = self.__class__(*other)
self.x, self.y = self._clamped(rect)
def _clipped(self, *other):
rect = self.__class__(*other)
if self.x >= rect.x and self.x < (rect.x + rect.w):
x = self.x
elif rect.x >= self.x and rect.x < (self.x + self.w):
x = rect.x
else:
raise NoIntersect
if (self.x + self.w) > rect.x and (self.x + self.w) <= (rect.x + rect.w):
w = self.x + self.w - x
elif (rect.x + rect.w) > self.x and (rect.x + rect.w) <= (self.x + self.w):
w = rect.x + rect.w - x
else:
raise NoIntersect
if self.y >= rect.y and self.y < (rect.y + rect.h):
y = self.y
elif rect.y >= self.y and rect.y < (self.y + self.h):
y = rect.y
else:
raise NoIntersect
if (self.y + self.h) > rect.y and (self.y + self.h) <= (rect.y + rect.h):
h = self.y + self.h - y
elif (rect.y + rect.h) > self.y and (rect.y + rect.h) <= (self.y + self.h):
h = rect.y + rect.h - y
else:
raise NoIntersect
return x, y, w, h
def clip(self, *other):
rect = self.__class__(*other)
try:
x, y, w, h = self._clipped(rect)
except NoIntersect:
x, y, w, h = self.x, self.y, 0, 0
return self.__class__(x, y, w, h)
def clip_ip(self, *other):
rect = self.__class__(*other)
try:
self.x, self.y, self.w, self.h = self._clipped(rect)
except NoIntersect:
self.x, self.y, self.w, self.h = self.x, self.y, 0, 0
def _unioned(self, *other):
rect = self.__class__(*other)
x = min(self.x, rect.x)
y = min(self.y, rect.y)
w = max(self.x + self.w, rect.x + rect.w) - x
h = max(self.y + self.h, rect.y + rect.h) - y
return x, y, w, h
def union(self, *other):
rect = self.__class__(*other)
return self.__class__(*self._unioned(rect))
def union_ip(self, *other):
rect = self.__class__(*other)
self.x, self.y, self.w, self.h = self._unioned(rect)
def _unionalled(self, others):
allrects = [self] + [self.__class__(other) for other in others]
x = min(r.x for r in allrects)
y = min(r.y for r in allrects)
w = max(r.x + r.w for r in allrects) - x
h = max(r.y + r.h for r in allrects) - y
return x, y, w, h
def unionall(self, others):
return self.__class__(*self._unionalled(others))
def unionall_ip(self, others):
self.x, self.y, self.w, self.h = self._unionalled(others)
def fit(self, *other):
rect = self.__class__(*other)
ratio = max(self.w / rect.w, self.h / rect.h)
w = self.w / ratio
h = self.h / ratio
x = rect.x + (rect.w - w) / 2
y = rect.y + (rect.h - h) / 2
return self.__class__(x, y, w, h)
def normalize(self):
if self.w < 0:
self.x += self.w
self.w = abs(self.w)
if self.h < 0:
self.y += self.h
self.h = abs(self.h)
def contains(self, *other):
rect = self.__class__(*other)
return (
self.x <= rect.x and
self.y <= rect.y and
self.x + self.w >= rect.x + rect.w and
self.y + self.h >= rect.y + rect.h and
self.x + self.w > rect.x and
self.y + self.h > rect.y
)
def collidepoint(self, *args):
if len(args) == 1:
x, y = args[0]
else:
x, y = args
return (
self.x <= x < (self.x + self.w) and
self.y <= y < (self.y + self.h)
)
def colliderect(self, *other):
rect = self.__class__(*other)
return (
self.x < rect.x + rect.w and
self.y < rect.y + rect.h and
self.x + self.w > rect.x and
self.y + self.h > rect.y
)
def collidelist(self, others):
for n, other in enumerate(others):
if self.colliderect(other):
return n
else:
return -1
def collidelistall(self, others):
return [n for n, other in enumerate(others) if self.colliderect(other)]
def collidedict(self, dict, use_values=True):
for k, v in dict.items():
if self.colliderect(v if use_values else k):
return k, v
def collidedictall(self, dict, use_values=True):
return [(k, v) for (k, v) in dict.items() if self.colliderect(v if use_values else k)]
RECT_CLASSES = (pygame.rect.Rect, ZRect)
| lgpl-3.0 | -2,143,938,976,755,739,000 | 29.722892 | 110 | 0.510318 | false | 3.395959 | false | false | false |
Netflix-Skunkworks/cloudaux | cloudaux/orchestration/aws/sqs.py | 1 | 2280 | from cloudaux.aws.sqs import get_queue_url, get_queue_attributes, list_queue_tags, list_dead_letter_source_queues
from cloudaux.decorators import modify_output
from flagpole import FlagRegistry, Flags
import logging
from cloudaux.orchestration.aws import ARN
logger = logging.getLogger('cloudaux')
registry = FlagRegistry()
FLAGS = Flags('BASE', 'TAGS', 'DEAD_LETTER_SOURCE_QUEUES')
@registry.register(flag=FLAGS.TAGS, key='tags')
def get_sqs_tags(sqs_queue, **conn):
return list_queue_tags(QueueUrl=sqs_queue["QueueUrl"], **conn)
@registry.register(flag=FLAGS.DEAD_LETTER_SOURCE_QUEUES, key='dead_letter_source_queues')
def get_dead_letter_queues(sqs_queue, **conn):
return list_dead_letter_source_queues(QueueUrl=sqs_queue["QueueUrl"], **conn)
@registry.register(flag=FLAGS.BASE)
def get_base(sqs_queue, **conn):
sqs_queue["Attributes"] = get_queue_attributes(QueueUrl=sqs_queue["QueueUrl"], AttributeNames=["All"], **conn)
# Get the Queue name:
name = ARN(sqs_queue["Attributes"]["QueueArn"]).parsed_name
return {
'arn': sqs_queue["Attributes"]["QueueArn"],
'url': sqs_queue["QueueUrl"],
'name': name,
'region': conn['region'],
'attributes': sqs_queue["Attributes"],
'_version': 1
}
@modify_output
def get_queue(queue, flags=FLAGS.ALL, **conn):
"""
Orchestrates all the calls required to fully fetch details about an SQS Queue:
{
"Arn": ...,
"Region": ...,
"Name": ...,
"Url": ...,
"Attributes": ...,
"Tags": ...,
"DeadLetterSourceQueues": ...,
"_version": 1
}
:param queue: Either the queue name OR the queue url
:param flags: By default, set to ALL fields.
:param conn: dict containing enough information to make a connection to the desired account. Must at least have
'assume_role' key.
:return: dict containing a fully built out SQS queue.
"""
# Check if this is a Queue URL or a queue name:
if queue.startswith("https://") or queue.startswith("http://"):
queue_name = queue
else:
queue_name = get_queue_url(QueueName=queue, **conn)
sqs_queue = {"QueueUrl": queue_name}
return registry.build_out(flags, sqs_queue, **conn)
| apache-2.0 | 8,771,817,787,487,272,000 | 30.666667 | 115 | 0.645614 | false | 3.551402 | false | false | false |
MarkNenadov/YNABpy | YNABpy/BaseClasses.py | 1 | 3510 | """
BaseClasses.py
INTRODUCTION
YNABpy - A Python module for the YNAB (You Need A Budget) application.
AUTHOR
Mark J. Nenadov (2011)
* Essex, Ontario
* Email: <[email protected]>
LICENSING
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version
This program is distributed in the hope that it will be useful
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
try:
from YNABpy.Support import xmlize
except ImportError:
print("FATAL ERROR, critical YNAB3py file missing: " + str(err))
class YNAB3_AccountingWidget(object):
"""
Base class for various YNAB3 things
(ie. YNAB3_Payee, YNAB3_Transaction)
"""
dom = None
fields_of_interest = [xmlize('memo'), xmlize('inflow'), xmlize('outflow')]
def __init__(self, transaction_dom, additional_fields_of_interest):
"""Constructor
"""
self.dom = transaction_dom
for field in additional_fields_of_interest:
if field not in self.fields_of_interest:
self.fields_of_interest.append(field)
for child in transaction_dom.childNodes:
self.load_properties(child)
def get_property(self, name):
""" get a property (return None if it doesn't exist)
We do this because this class loads properties from the xml
dynamically, so there's a chance some properties may be missing
"""
if hasattr(self, name):
return getattr(self, name)
return None
def get_inflow(self):
""" get_inflow
"""
return self.get_property('inflow')
def get_outflow(self):
""" get_outflow
"""
return self.get_property('outflow')
def get_balance(self):
""" get_balance
Get the balance for this transaction, accounting
for both outflow and inflow
"""
if self.get_outflow() != None and self.get_inflow() != None:
return float(self.get_inflow()) - float(self.get_outflow())
else:
return None
def get_memo(self):
""" get_memo
"""
return self.get_property('memo')
def toxml(self):
""" Get XML representation of this objects dom
"""
return self.dom.toxml()
class YNAB3_Lister(object):
"""
YNAB3_Lister base class
"""
contents = []
def __init__(self):
"""Constructor
"""
pass
def get_content(self):
""" return array of listed objects
"""
return self.contents
def add(self, t):
""" add an item
"""
self.contents.append(t)
def get_items_by_text_filter(self, field, filter_str):
""" Get items that have a argument-supplied property value that
matches a substring
"""
item_list = []
for item in self.get_content():
if item.get_property(field) != None:
if (item.get_property(field).find(filter_str) != -1):
item_list.append(item)
return item_list
| lgpl-3.0 | -2,300,809,593,029,452,800 | 22.092105 | 78 | 0.611111 | false | 3.993174 | false | false | false |
zhuzhezhe/django_blog | mywebsite/settings.py | 1 | 3965 | #coding:utf-8
"""
Django settings for mywebsite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mx=_n)ji!d!+llfrhkwljbh9*0$l=4io@u0mchg4w#1w77xvk#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'suit', # django后台
#'django_admin_bootstrapped', #一个bootstrap样式的后台
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#apps
'blog',
#'markdown_deux', #markdown support
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mywebsite.urls'
WSGI_APPLICATION = 'mywebsite.wsgi.application'
'''
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
#配置mysql
# 线上数据库的配置
MYSQL_HOST = 'w.rdc.sae.sina.com.cn'
MYSQL_PORT = '3307'
MYSQL_USER = '02z4loxk1y'
MYSQL_PASS = 'iky3xmxxz4jwk1j401lzmzlzmhmykyll05kxkwmx'
MYSQL_DB = 'app_zhuzhezhe'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_DB,
'USER': MYSQL_USER,
'PASSWORD': MYSQL_PASS,
'HOST': MYSQL_HOST,
'PORT': MYSQL_PORT,
}
}
'''
# sqlite3配置
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# mysql配置
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mywebsite',
'HOST': '127.0.0.1',
'PORT': '3306',
'USER': 'root',
'PASSWORD': 'password',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
# 静态文件的相关设置
STATIC_URL = '/static/'
STATIC_ROOT = 'static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
'/blog/static/',
)
#STATIC_ROOT = "/blog/static/"
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
#other
DAB_FIELD_RENDERER = 'django_admin_bootstrapped.renderers.BootstrapFieldRenderer'
#这里是后台的配置
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
)
| mit | 2,951,843,428,582,509,600 | 24.227273 | 81 | 0.663063 | false | 3.078447 | false | false | false |
lesh1k/beatport-verifier | v 1/BP_top100_v_1.1.0.py | 1 | 3528 | #This program is supposed to fetch the top 100 list from Beatport and compare it to the previously
#saved list, then present the difference, hence the new entries.
#Dependencies. It is written in Python 2.7 on Windows and it uses BeautifulSoup4
####Version log##################
##################################################
# v. 1.0.0 Released on 3 December 2012 at 23:31
#
#Basic functionality. First working release.
#
##################################################
#v. 1.1.0 Released 21 March 2013 01:37
#
#The program was not "cross-platform" it could not be ran correctly from
#any, without specifying correct oldTracks list
#
#1) Added determining the path to old Tracklist
#
import urllib
import codecs
import os
import time
from bs4 import BeautifulSoup
oldList='old.txt'
newList='new.txt'
trackListFile='tracks.txt'
newEntries='NewTracks.txt'
folderName='Data'
curPath='/'.join(__file__.split('/')[:-1])
PATH=os.path.join(os.getcwd(), curPath, folderName)
VERSION='1.1.0'
def GetNewTrackList():
#Returns the list of new tracks
global PATH, folderName, trackListFile
fullPath=os.path.join(PATH,trackListFile)
if os.path.exists(fullPath):
oldData=ReadData(fullPath)
newData=DownloadTrackList()
return ComapreLists(oldData,newData)
else:
try:
os.mkdir(folderName)
print "The program is run for the first time.\n The directory and the initial list with 100 tracks will be created!"
except:
print 'The folder already exists!!'
newData=DownloadTrackList()
return ReadData(fullPath)
def DownloadTrackList():
#writes the data to the file returns the set of top 100 tracks from Beatport
URL="http://www.beatport.com/top-100"
html=urllib.urlopen(URL).read()
soup=BeautifulSoup(html)
data=''
#skip the first element because it's the name of the column in the table
trackList=soup.find_all('td',{'class':'secondColumn'})[1:]
for element in trackList:
data= data+codecs.encode(element.text,'utf-8')+'\n'
#Get rid of the last NewLine element
data=data[:-1]
WriteData(trackListFile,data)
data=data.split('\n')
return data
def ReadData(filePath):
#reads the list of tracks
toRead=open(filePath,'r')
data=toRead.read()
if data.find('\r\n'):
data=data.replace('\r\n','\n')
elif data.find('\r'):
data=data.replace('\r','\n')
data=data.split('\n')
toRead.close()
return data
def WriteData(fileName,data):
#Write the list of tracks to a file
global PATH
toWrite=open(os.path.join(PATH,fileName),'w')
toWrite.write(data)
toWrite.close()
def ComapreLists(oldL,newL):
#will return the list of new entries to the top-100. If any.
global newEntries
t=time.localtime()
locTime='Date: '+str(t[2])+'.'+str(t[1])+'.'+str(t[0])+'. Time: '+str(str(t[3])+'hrs '+str(t[4])+'mins '+str(t[5])+'s')
NewTracksList=[]
for track in newL:
if track not in oldL:
NewTracksList.append(track)
prettyResult=locTime+'\n\n\n'
if len(NewTracksList)==0:
NewTracksList.append("No New Entries Yet!!")
#fromat the result before writing/printing
for element in NewTracksList:
prettyResult=prettyResult+element+'\n'
WriteData(newEntries, prettyResult)
return prettyResult
if __name__=="__main__":
print 'Hello! I am Beatport verifier version '+VERSION+'\nI am already downloading the updated tracklist. Please be patient...\n\n'
result= GetNewTrackList()
if not raw_input("Print the list of new tracks? (ENTER - yes/ any character, then ENTER - no) "):
print '\n\n'+result
raw_input('Execution has finished. Press any key...')
| cc0-1.0 | -2,010,492,077,310,553,000 | 24.021277 | 132 | 0.692744 | false | 3.119363 | false | false | false |
jingzhehu/udacity_mlnd | P4_Training_a_Smartcab_to_Drive/smartcab/simulator.py | 1 | 8851 | import os
import time
import random
import importlib
class Simulator(object):
"""Simulates agents in a dynamic smartcab environment.
Uses PyGame to display GUI, if available.
"""
colors = {
'black' : ( 0, 0, 0),
'white' : (255, 255, 255),
'red' : (255, 0, 0),
'green' : ( 0, 255, 0),
'blue' : ( 0, 0, 255),
'cyan' : ( 0, 200, 200),
'magenta' : (200, 0, 200),
'yellow' : (255, 255, 0),
'orange' : (255, 128, 0)
}
def __init__(self, env, size=None, update_delay=1.0, display=True):
self.env = env
self.size = size if size is not None else ((self.env.grid_size[0] + 1) * self.env.block_size, (self.env.grid_size[1] + 1) * self.env.block_size)
self.width, self.height = self.size
self.bg_color = self.colors['white']
self.road_width = 5
self.road_color = self.colors['black']
self.quit = False
self.start_time = None
self.current_time = 0.0
self.last_updated = 0.0
self.update_delay = update_delay
self.display = display
if self.display:
try:
self.pygame = importlib.import_module('pygame')
self.pygame.init()
self.screen = self.pygame.display.set_mode(self.size)
self.frame_delay = max(1, int(self.update_delay * 1000)) # delay between GUI frames in ms (min: 1)
self.agent_sprite_size = (32, 32)
self.agent_circle_radius = 10 # radius of circle, when using simple representation
for agent in self.env.agent_states:
agent._sprite = self.pygame.transform.smoothscale(
self.pygame.image.load(os.path.join("images", "car-{}.png".format(agent.color))), self.agent_sprite_size)
agent._sprite_size = (agent._sprite.get_width(), agent._sprite.get_height())
self.font = self.pygame.font.Font(None, 28)
self.paused = False
except ImportError as e:
self.display = False
print "Simulator.__init__(): Unable to import pygame; display disabled.\n{}: {}".format(e.__class__.__name__, e)
except Exception as e:
self.display = False
print "Simulator.__init__(): Error initializing GUI objects; display disabled.\n{}: {}".format(e.__class__.__name__, e)
def run(self, n_trials=1):
self.quit = False
for trial in xrange(n_trials):
# print "Simulator.run(): Trial {}".format(trial) # [debug]
self.env.reset()
self.current_time = 0.0
self.last_updated = 0.0
self.start_time = time.time()
while True:
try:
# Update current time
self.current_time = time.time() - self.start_time
#print "Simulator.run(): current_time = {:.3f}".format(self.current_time)
# Handle GUI events
if self.display:
for event in self.pygame.event.get():
if event.type == self.pygame.QUIT:
self.quit = True
elif event.type == self.pygame.KEYDOWN:
if event.key == 27: # Esc
self.quit = True
elif event.unicode == u' ':
self.paused = True
if self.paused:
self.pause()
# Update environment
if self.current_time - self.last_updated >= self.update_delay:
self.env.step(trial=trial)
self.last_updated = self.current_time
# Render GUI and sleep
if self.display:
self.render()
self.pygame.time.wait(self.frame_delay)
except KeyboardInterrupt:
self.quit = True
finally:
if self.quit or self.env.done:
break
if self.quit:
break
def render(self):
# Clear screen
self.screen.fill(self.bg_color)
# Draw elements
# * Static elements
for road in self.env.roads:
self.pygame.draw.line(self.screen, self.road_color, (road[0][0] * self.env.block_size, road[0][1] * self.env.block_size), (road[1][0] * self.env.block_size, road[1][1] * self.env.block_size), self.road_width)
for intersection, traffic_light in self.env.intersections.iteritems():
self.pygame.draw.circle(self.screen, self.road_color, (intersection[0] * self.env.block_size, intersection[1] * self.env.block_size), 10)
if traffic_light.state: # North-South is open
self.pygame.draw.line(self.screen, self.colors['green'],
(intersection[0] * self.env.block_size, intersection[1] * self.env.block_size - 15),
(intersection[0] * self.env.block_size, intersection[1] * self.env.block_size + 15), self.road_width)
else: # East-West is open
self.pygame.draw.line(self.screen, self.colors['green'],
(intersection[0] * self.env.block_size - 15, intersection[1] * self.env.block_size),
(intersection[0] * self.env.block_size + 15, intersection[1] * self.env.block_size), self.road_width)
# * Dynamic elements
for agent, state in self.env.agent_states.iteritems():
# Compute precise agent location here (back from the intersection some)
agent_offset = (2 * state['heading'][0] * self.agent_circle_radius, 2 * state['heading'][1] * self.agent_circle_radius)
agent_pos = (state['location'][0] * self.env.block_size - agent_offset[0], state['location'][1] * self.env.block_size - agent_offset[1])
agent_color = self.colors[agent.color]
if hasattr(agent, '_sprite') and agent._sprite is not None:
# Draw agent sprite (image), properly rotated
rotated_sprite = agent._sprite if state['heading'] == (1, 0) else self.pygame.transform.rotate(agent._sprite, 180 if state['heading'][0] == -1 else state['heading'][1] * -90)
self.screen.blit(rotated_sprite,
self.pygame.rect.Rect(agent_pos[0] - agent._sprite_size[0] / 2, agent_pos[1] - agent._sprite_size[1] / 2,
agent._sprite_size[0], agent._sprite_size[1]))
else:
# Draw simple agent (circle with a short line segment poking out to indicate heading)
self.pygame.draw.circle(self.screen, agent_color, agent_pos, self.agent_circle_radius)
self.pygame.draw.line(self.screen, agent_color, agent_pos, state['location'], self.road_width)
if agent.get_next_waypoint() is not None:
self.screen.blit(self.font.render(agent.get_next_waypoint(), True, agent_color, self.bg_color), (agent_pos[0] + 10, agent_pos[1] + 10))
if state['destination'] is not None:
self.pygame.draw.circle(self.screen, agent_color, (state['destination'][0] * self.env.block_size, state['destination'][1] * self.env.block_size), 6)
self.pygame.draw.circle(self.screen, agent_color, (state['destination'][0] * self.env.block_size, state['destination'][1] * self.env.block_size), 15, 2)
# * Overlays
text_y = 10
for text in self.env.status_text.split('\n'):
self.screen.blit(self.font.render(text, True, self.colors['red'], self.bg_color), (100, text_y))
text_y += 20
text_y = 10
time_str = 'time: ' + str(self.env.t)
self.screen.blit(self.font.render(time_str, True, self.colors['red'], self.bg_color), (600, text_y))
# Flip buffers
self.pygame.display.flip()
def pause(self):
abs_pause_time = time.time()
pause_text = "[PAUSED] Press any key to continue..."
self.screen.blit(self.font.render(pause_text, True, self.colors['cyan'], self.bg_color), (100, self.height - 40))
self.pygame.display.flip()
print pause_text # [debug]
while self.paused:
for event in self.pygame.event.get():
if event.type == self.pygame.KEYDOWN:
self.paused = False
self.pygame.time.wait(self.frame_delay)
self.screen.blit(self.font.render(pause_text, True, self.bg_color, self.bg_color), (100, self.height - 40))
self.start_time += (time.time() - abs_pause_time)
| mit | -5,244,304,932,999,362,000 | 49.00565 | 220 | 0.54073 | false | 3.805245 | false | false | false |
ctwiz/stardust | qa/rpc-tests/test_framework/key.py | 1 | 7367 | # Copyright (c) 2011 Sam Rushing
#
# key.py - OpenSSL wrapper
#
# This file is modified from python-stardustlib.
#
"""ECC secp256k1 crypto routines
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
"""
import ctypes
import ctypes.util
import hashlib
import sys
ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')
ssl.BN_new.restype = ctypes.c_void_p
ssl.BN_new.argtypes = []
ssl.BN_bin2bn.restype = ctypes.c_void_p
ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
ssl.BN_CTX_free.restype = None
ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
ssl.BN_CTX_new.restype = ctypes.c_void_p
ssl.BN_CTX_new.argtypes = []
ssl.ECDH_compute_key.restype = ctypes.c_int
ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_sign.restype = ctypes.c_int
ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_verify.restype = ctypes.c_int
ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ssl.EC_KEY_free.restype = None
ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_set_private_key.restype = ctypes.c_int
ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_KEY_set_conv_form.restype = None
ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
ssl.EC_KEY_set_public_key.restype = ctypes.c_int
ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_POINT_new.restype = ctypes.c_void_p
ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_free.restype = None
ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_mul.restype = ctypes.c_int
ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
# this specifies the curve used with ECDSA.
NID_secp256k1 = 714 # from openssl/obj_mac.h
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def _check_result(val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p (val)
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
class CECKey(object):
"""Wrapper around OpenSSL's EC_KEY"""
POINT_CONVERSION_COMPRESSED = 2
POINT_CONVERSION_UNCOMPRESSED = 4
def __init__(self):
self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
def __del__(self):
if ssl:
ssl.EC_KEY_free(self.k)
self.k = None
def set_secretbytes(self, secret):
priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
group = ssl.EC_KEY_get0_group(self.k)
pub_key = ssl.EC_POINT_new(group)
ctx = ssl.BN_CTX_new()
if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
raise ValueError("Could not derive public key from the supplied secret.")
ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
ssl.EC_KEY_set_private_key(self.k, priv_key)
ssl.EC_KEY_set_public_key(self.k, pub_key)
ssl.EC_POINT_free(pub_key)
ssl.BN_CTX_free(ctx)
return self.k
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def get_raw_ecdh_key(self, other_pubkey):
ecdh_keybuffer = ctypes.create_string_buffer(32)
r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32,
ssl.EC_KEY_get0_public_key(other_pubkey.k),
self.k, 0)
if r != 32:
raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')
return ecdh_keybuffer.raw
def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ecdh_key(other_pubkey)
return kdf(r)
def sign(self, hash):
# FIXME: need unit tests for below cases
if not isinstance(hash, bytes):
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
if len(hash) != 32:
raise ValueError('Hash must be exactly 32 bytes long')
sig_size0 = ctypes.c_uint32()
sig_size0.value = ssl.ECDSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size0.value)
result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
assert 1 == result
return mb_sig.raw[:sig_size0.value]
def verify(self, hash, sig):
"""Verify a DER signature"""
return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
ssl.EC_KEY_set_conv_form(self.k, form)
class CPubKey(bytes):
"""An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
"""
def __new__(cls, buf, _cec_key=None):
self = super(CPubKey, cls).__new__(cls, buf)
if _cec_key is None:
_cec_key = CECKey()
self._cec_key = _cec_key
self.is_fullyvalid = _cec_key.set_pubkey(self) != 0
return self
@property
def is_valid(self):
return len(self) > 0
@property
def is_compressed(self):
return len(self) == 33
def verify(self, hash, sig):
return self._cec_key.verify(hash, sig)
def __str__(self):
return repr(self)
def __repr__(self):
# Always have represent as b'<secret>' so test cases don't have to
# change for py2/3
if sys.version > '3':
return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
else:
return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
| mit | 4,338,475,104,793,338,400 | 33.265116 | 130 | 0.641102 | false | 2.883366 | false | false | false |
openstack/oslo.versionedobjects | oslo_versionedobjects/base.py | 1 | 51253 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common internal object model"""
import abc
import collections
from collections import abc as collections_abc
import copy
import functools
import logging
import warnings
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import versionutils as vutils
from oslo_versionedobjects._i18n import _
from oslo_versionedobjects import exception
from oslo_versionedobjects import fields as obj_fields
LOG = logging.getLogger('object')
class _NotSpecifiedSentinel(object):
pass
def _get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_obj_' + name
def _make_class_properties(cls):
# NOTE(danms/comstud): Inherit fields from super classes.
# mro() returns the current class first and returns 'object' last, so
# those can be skipped. Also be careful to not overwrite any fields
# that already exist. And make sure each cls has its own copy of
# fields and that it is not sharing the dict with a super class.
cls.fields = dict(cls.fields)
for supercls in cls.mro()[1:-1]:
if not hasattr(supercls, 'fields'):
continue
for name, field in supercls.fields.items():
if name not in cls.fields:
cls.fields[name] = field
for name, field in cls.fields.items():
if not isinstance(field, obj_fields.Field):
raise exception.ObjectFieldInvalid(
field=name, objname=cls.obj_name())
def getter(self, name=name):
attrname = _get_attrname(name)
if not hasattr(self, attrname):
self.obj_load_attr(name)
return getattr(self, attrname)
def setter(self, value, name=name, field=field):
attrname = _get_attrname(name)
field_value = field.coerce(self, name, value)
if field.read_only and hasattr(self, attrname):
# Note(yjiang5): _from_db_object() may iterate
# every field and write, no exception in such situation.
if getattr(self, attrname) != field_value:
raise exception.ReadOnlyFieldError(field=name)
else:
return
self._changed_fields.add(name)
try:
return setattr(self, attrname, field_value)
except Exception:
with excutils.save_and_reraise_exception():
attr = "%s.%s" % (self.obj_name(), name)
LOG.exception('Error setting %(attr)s',
{'attr': attr})
def deleter(self, name=name):
attrname = _get_attrname(name)
if not hasattr(self, attrname):
raise AttributeError("No such attribute `%s'" % name)
delattr(self, attrname)
setattr(cls, name, property(getter, setter, deleter))
class VersionedObjectRegistry(object):
_registry = None
def __new__(cls, *args, **kwargs):
if not VersionedObjectRegistry._registry:
VersionedObjectRegistry._registry = object.__new__(
VersionedObjectRegistry, *args, **kwargs)
VersionedObjectRegistry._registry._obj_classes = \
collections.defaultdict(list)
self = object.__new__(cls, *args, **kwargs)
self._obj_classes = VersionedObjectRegistry._registry._obj_classes
return self
def registration_hook(self, cls, index):
pass
def _register_class(self, cls):
def _vers_tuple(obj):
return vutils.convert_version_to_tuple(obj.VERSION)
_make_class_properties(cls)
obj_name = cls.obj_name()
for i, obj in enumerate(self._obj_classes[obj_name]):
self.registration_hook(cls, i)
if cls.VERSION == obj.VERSION:
self._obj_classes[obj_name][i] = cls
break
if _vers_tuple(cls) > _vers_tuple(obj):
# Insert before.
self._obj_classes[obj_name].insert(i, cls)
break
else:
# Either this is the first time we've seen the object or it's
# an older version than anything we'e seen.
self._obj_classes[obj_name].append(cls)
self.registration_hook(cls, 0)
@classmethod
def register(cls, obj_cls):
registry = cls()
registry._register_class(obj_cls)
return obj_cls
@classmethod
def register_if(cls, condition):
def wraps(obj_cls):
if condition:
obj_cls = cls.register(obj_cls)
else:
_make_class_properties(obj_cls)
return obj_cls
return wraps
@classmethod
def objectify(cls, obj_cls):
return cls.register_if(False)(obj_cls)
@classmethod
def obj_classes(cls):
registry = cls()
return registry._obj_classes
# These are decorators that mark an object's method as remotable.
# If the metaclass is configured to forward object methods to an
# indirection service, these will result in making an RPC call
# instead of directly calling the implementation in the object. Instead,
# the object implementation on the remote end will perform the
# requested action and the result will be returned here.
def remotable_classmethod(fn):
"""Decorator for remotable classmethods."""
@functools.wraps(fn)
def wrapper(cls, context, *args, **kwargs):
if cls.indirection_api:
version_manifest = obj_tree_get_versions(cls.obj_name())
try:
result = cls.indirection_api.object_class_action_versions(
context, cls.obj_name(), fn.__name__, version_manifest,
args, kwargs)
except NotImplementedError:
# FIXME(danms): Maybe start to warn here about deprecation?
result = cls.indirection_api.object_class_action(
context, cls.obj_name(), fn.__name__, cls.VERSION,
args, kwargs)
else:
result = fn(cls, context, *args, **kwargs)
if isinstance(result, VersionedObject):
result._context = context
return result
# NOTE(danms): Make this discoverable
wrapper.remotable = True
wrapper.original_fn = fn
return classmethod(wrapper)
# See comment above for remotable_classmethod()
#
# Note that this will use either the provided context, or the one
# stashed in the object. If neither are present, the object is
# "orphaned" and remotable methods cannot be called.
def remotable(fn):
"""Decorator for remotable object methods."""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
ctxt = self._context
if ctxt is None:
raise exception.OrphanedObjectError(method=fn.__name__,
objtype=self.obj_name())
if self.indirection_api:
updates, result = self.indirection_api.object_action(
ctxt, self, fn.__name__, args, kwargs)
for key, value in updates.items():
if key in self.fields:
field = self.fields[key]
# NOTE(ndipanov): Since VersionedObjectSerializer will have
# deserialized any object fields into objects already,
# we do not try to deserialize them again here.
if isinstance(value, VersionedObject):
setattr(self, key, value)
else:
setattr(self, key,
field.from_primitive(self, key, value))
self.obj_reset_changes()
self._changed_fields = set(updates.get('obj_what_changed', []))
return result
else:
return fn(self, *args, **kwargs)
wrapper.remotable = True
wrapper.original_fn = fn
return wrapper
class VersionedObject(object):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
indirection_api = None
# Object versioning rules
#
# Each service has its set of objects, each with a version attached. When
# a client attempts to call an object method, the server checks to see if
# the version of that object matches (in a compatible way) its object
# implementation. If so, cool, and if not, fail.
#
# This version is allowed to have three parts, X.Y.Z, where the .Z element
# is reserved for stable branch backports. The .Z is ignored for the
# purposes of triggering a backport, which means anything changed under
# a .Z must be additive and non-destructive such that a node that knows
# about X.Y can consider X.Y.Z equivalent.
VERSION = '1.0'
# Object namespace for serialization
# NB: Generally this should not be changed, but is needed for backwards
# compatibility
OBJ_SERIAL_NAMESPACE = 'versioned_object'
# Object project namespace for serialization
# This is used to disambiguate owners of objects sharing a common RPC
# medium
OBJ_PROJECT_NAMESPACE = 'versionedobjects'
# The fields present in this object as key:field pairs. For example:
#
# fields = { 'foo': obj_fields.IntegerField(),
# 'bar': obj_fields.StringField(),
# }
fields = {}
obj_extra_fields = []
# Table of sub-object versioning information
#
# This contains a list of version mappings, by the field name of
# the subobject. The mappings must be in order of oldest to
# newest, and are tuples of (my_version, subobject_version). A
# request to backport this object to $my_version will cause the
# subobject to be backported to $subobject_version.
#
# obj_relationships = {
# 'subobject1': [('1.2', '1.1'), ('1.4', '1.2')],
# 'subobject2': [('1.2', '1.0')],
# }
#
# In the above example:
#
# - If we are asked to backport our object to version 1.3,
# subobject1 will be backported to version 1.1, since it was
# bumped to version 1.2 when our version was 1.4.
# - If we are asked to backport our object to version 1.5,
# no changes will be made to subobject1 or subobject2, since
# they have not changed since version 1.4.
# - If we are asked to backlevel our object to version 1.1, we
# will remove both subobject1 and subobject2 from the primitive,
# since they were not added until version 1.2.
obj_relationships = {}
def __init__(self, context=None, **kwargs):
self._changed_fields = set()
self._context = context
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def __repr__(self):
repr_str = '%s(%s)' % (
self.obj_name(),
','.join(['%s=%s' % (name,
(self.obj_attr_is_set(name) and
field.stringify(getattr(self, name)) or
'<?>'))
for name, field in sorted(self.fields.items())]))
return repr_str
def __contains__(self, name):
try:
return self.obj_attr_is_set(name)
except AttributeError:
return False
@classmethod
def to_json_schema(cls):
obj_name = cls.obj_name()
schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': obj_name,
}
schema.update(obj_fields.Object(obj_name).get_schema())
return schema
@classmethod
def obj_name(cls):
"""Return the object's name
Return a canonical name for this object which will be used over
the wire for remote hydration.
"""
return cls.__name__
@classmethod
def _obj_primitive_key(cls, field):
return '%s.%s' % (cls.OBJ_SERIAL_NAMESPACE, field)
@classmethod
def _obj_primitive_field(cls, primitive, field,
default=obj_fields.UnspecifiedDefault):
key = cls._obj_primitive_key(field)
if default == obj_fields.UnspecifiedDefault:
return primitive[key]
else:
return primitive.get(key, default)
@classmethod
def obj_class_from_name(cls, objname, objver):
"""Returns a class from the registry based on a name and version."""
if objname not in VersionedObjectRegistry.obj_classes():
LOG.error('Unable to instantiate unregistered object type '
'%(objtype)s'), dict(objtype=objname)
raise exception.UnsupportedObjectError(objtype=objname)
# NOTE(comstud): If there's not an exact match, return the highest
# compatible version. The objects stored in the class are sorted
# such that highest version is first, so only set compatible_match
# once below.
compatible_match = None
for objclass in VersionedObjectRegistry.obj_classes()[objname]:
if objclass.VERSION == objver:
return objclass
if (not compatible_match and
vutils.is_compatible(objver, objclass.VERSION)):
compatible_match = objclass
if compatible_match:
return compatible_match
# As mentioned above, latest version is always first in the list.
latest_ver = VersionedObjectRegistry.obj_classes()[objname][0].VERSION
raise exception.IncompatibleObjectVersion(objname=objname,
objver=objver,
supported=latest_ver)
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = cls()
self._context = context
self.VERSION = objver
objdata = cls._obj_primitive_field(primitive, 'data')
changes = cls._obj_primitive_field(primitive, 'changes', [])
for name, field in self.fields.items():
if name in objdata:
setattr(self, name, field.from_primitive(self, name,
objdata[name]))
self._changed_fields = set([x for x in changes if x in self.fields])
return self
@classmethod
def obj_from_primitive(cls, primitive, context=None):
"""Object field-by-field hydration."""
objns = cls._obj_primitive_field(primitive, 'namespace')
objname = cls._obj_primitive_field(primitive, 'name')
objver = cls._obj_primitive_field(primitive, 'version')
if objns != cls.OBJ_PROJECT_NAMESPACE:
# NOTE(danms): We don't do anything with this now, but it's
# there for "the future"
raise exception.UnsupportedObjectError(
objtype='%s.%s' % (objns, objname))
objclass = cls.obj_class_from_name(objname, objver)
return objclass._obj_from_primitive(context, objver, primitive)
def __deepcopy__(self, memo):
"""Efficiently make a deep copy of this object."""
# NOTE(danms): A naive deepcopy would copy more than we need,
# and since we have knowledge of the volatile bits of the
# object, we can be smarter here. Also, nested entities within
# some objects may be uncopyable, so we can avoid those sorts
# of issues by copying only our field data.
nobj = self.__class__()
# NOTE(sskripnick): we should save newly created object into mem
# to let deepcopy know which branches are already created.
# See launchpad bug #1602314 for more details
memo[id(self)] = nobj
nobj._context = self._context
for name in self.fields:
if self.obj_attr_is_set(name):
nval = copy.deepcopy(getattr(self, name), memo)
setattr(nobj, name, nval)
nobj._changed_fields = set(self._changed_fields)
return nobj
def obj_clone(self):
"""Create a copy."""
return copy.deepcopy(self)
def _obj_relationship_for(self, field, target_version):
# NOTE(danms): We need to be graceful about not having the temporary
# version manifest if called from obj_make_compatible().
if (not hasattr(self, '_obj_version_manifest') or
self._obj_version_manifest is None):
try:
return self.obj_relationships[field]
except KeyError:
raise exception.ObjectActionError(
action='obj_make_compatible',
reason='No rule for %s' % field)
objname = self.fields[field].objname
if objname not in self._obj_version_manifest:
return
# NOTE(danms): Compute a relationship mapping that looks like
# what the caller expects.
return [(target_version, self._obj_version_manifest[objname])]
def _obj_make_obj_compatible(self, primitive, target_version, field):
"""Backlevel a sub-object based on our versioning rules.
This is responsible for backporting objects contained within
this object's primitive according to a set of rules we
maintain about version dependencies between objects. This
requires that the obj_relationships table in this object is
correct and up-to-date.
:param:primitive: The primitive version of this object
:param:target_version: The version string requested for this object
:param:field: The name of the field in this object containing the
sub-object to be backported
"""
relationship_map = self._obj_relationship_for(field, target_version)
if not relationship_map:
# NOTE(danms): This means the field was not specified in the
# version manifest from the client, so it must not want this
# field, so skip.
return
try:
_get_subobject_version(target_version,
relationship_map,
lambda ver: _do_subobject_backport(
ver, self, field, primitive))
except exception.TargetBeforeSubobjectExistedException:
# Subobject did not exist, so delete it from the primitive
del primitive[field]
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version.
This is responsible for taking the primitive representation of
an object and making it suitable for the given target_version.
This may mean converting the format of object attributes, removing
attributes that have been added since the target version, etc. In
general:
- If a new version of an object adds a field, this routine
should remove it for older versions.
- If a new version changed or restricted the format of a field, this
should convert it back to something a client knowing only of the
older version will tolerate.
- If an object that this object depends on is bumped, then this
object should also take a version bump. Then, this routine should
backlevel the dependent object (by calling its obj_make_compatible())
if the requested version of this object is older than the version
where the new dependent object was added.
:param primitive: The result of :meth:`obj_to_primitive`
:param target_version: The version string requested by the recipient
of the object
:raises: :exc:`oslo_versionedobjects.exception.UnsupportedObjectError`
if conversion is not possible for some reason
"""
for key, field in self.fields.items():
if not isinstance(field, (obj_fields.ObjectField,
obj_fields.ListOfObjectsField)):
continue
if not self.obj_attr_is_set(key):
continue
self._obj_make_obj_compatible(primitive, target_version, key)
def obj_make_compatible_from_manifest(self, primitive, target_version,
version_manifest):
# NOTE(danms): Stash the manifest on the object so we can use it in
# the deeper layers. We do this because obj_make_compatible() is
# defined library API at this point, yet we need to get this manifest
# to the other bits that get called so we can propagate it to child
# calls. It's not pretty, but a tactical solution. Ideally we will
# either evolve or deprecate obj_make_compatible() in a major version
# bump.
self._obj_version_manifest = version_manifest
try:
return self.obj_make_compatible(primitive, target_version)
finally:
delattr(self, '_obj_version_manifest')
def obj_to_primitive(self, target_version=None, version_manifest=None):
"""Simple base-case dehydration.
This calls to_primitive() for each item in fields.
"""
if target_version is None:
target_version = self.VERSION
if (vutils.convert_version_to_tuple(target_version) >
vutils.convert_version_to_tuple(self.VERSION)):
raise exception.InvalidTargetVersion(version=target_version)
primitive = dict()
for name, field in self.fields.items():
if self.obj_attr_is_set(name):
primitive[name] = field.to_primitive(self, name,
getattr(self, name))
# NOTE(danms): If we know we're being asked for a different version,
# then do the compat step. However, even if we think we're not,
# we may have sub-objects that need it, so if we have a manifest we
# have to traverse this object just in case. Previously, we
# required a parent version bump for any child, so the target
# check was enough.
if target_version != self.VERSION or version_manifest:
self.obj_make_compatible_from_manifest(primitive,
target_version,
version_manifest)
obj = {self._obj_primitive_key('name'): self.obj_name(),
self._obj_primitive_key('namespace'): (
self.OBJ_PROJECT_NAMESPACE),
self._obj_primitive_key('version'): target_version,
self._obj_primitive_key('data'): primitive}
if self.obj_what_changed():
# NOTE(cfriesen): if we're downgrading to a lower version, then
# it's possible that self.obj_what_changed() includes fields that
# no longer exist in the lower version. If so, filter them out.
what_changed = self.obj_what_changed()
changes = [field for field in what_changed if field in primitive]
if changes:
obj[self._obj_primitive_key('changes')] = changes
return obj
def obj_set_defaults(self, *attrs):
if not attrs:
attrs = [name for name, field in self.fields.items()
if field.default != obj_fields.UnspecifiedDefault]
for attr in attrs:
default = copy.deepcopy(self.fields[attr].default)
if default is obj_fields.UnspecifiedDefault:
raise exception.ObjectActionError(
action='set_defaults',
reason='No default set for field %s' % attr)
if not self.obj_attr_is_set(attr):
setattr(self, attr, default)
def obj_load_attr(self, attrname):
"""Load an additional attribute from the real object.
This should load self.$attrname and cache any data that might
be useful for future load operations.
"""
raise NotImplementedError(
_("Cannot load '%s' in the base class") % attrname)
def save(self, context):
"""Save the changed fields back to the store.
This is optional for subclasses, but is presented here in the base
class for consistency among those that do.
"""
raise NotImplementedError(_('Cannot save anything in the base class'))
def obj_what_changed(self):
"""Returns a set of fields that have been modified."""
changes = set([field for field in self._changed_fields
if field in self.fields])
for field in self.fields:
if (self.obj_attr_is_set(field) and
isinstance(getattr(self, field), VersionedObject) and
getattr(self, field).obj_what_changed()):
changes.add(field)
return changes
def obj_get_changes(self):
"""Returns a dict of changed fields and their new values."""
changes = {}
for key in self.obj_what_changed():
changes[key] = getattr(self, key)
return changes
def obj_reset_changes(self, fields=None, recursive=False):
"""Reset the list of fields that have been changed.
:param fields: List of fields to reset, or "all" if None.
:param recursive: Call obj_reset_changes(recursive=True) on
any sub-objects within the list of fields
being reset.
This is NOT "revert to previous values".
Specifying fields on recursive resets will only be honored at the top
level. Everything below the top will reset all.
"""
if recursive:
for field in self.obj_get_changes():
# Ignore fields not in requested set (if applicable)
if fields and field not in fields:
continue
# Skip any fields that are unset
if not self.obj_attr_is_set(field):
continue
value = getattr(self, field)
# Don't reset nulled fields
if value is None:
continue
# Reset straight Object and ListOfObjects fields
if isinstance(self.fields[field], obj_fields.ObjectField):
value.obj_reset_changes(recursive=True)
elif isinstance(self.fields[field],
obj_fields.ListOfObjectsField):
for thing in value:
thing.obj_reset_changes(recursive=True)
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
def obj_attr_is_set(self, attrname):
"""Test object to see if attrname is present.
Returns True if the named attribute has a value set, or
False if not. Raises AttributeError if attrname is not
a valid attribute for this object.
"""
if attrname not in self.obj_fields:
raise AttributeError(
_("%(objname)s object has no attribute '%(attrname)s'") %
{'objname': self.obj_name(), 'attrname': attrname})
return hasattr(self, _get_attrname(attrname))
@property
def obj_fields(self):
return list(self.fields.keys()) + self.obj_extra_fields
@property
def obj_context(self):
return self._context
class ComparableVersionedObject(object):
"""Mix-in to provide comparison methods
When objects are to be compared with each other (in tests for example),
this mixin can be used.
"""
def __eq__(self, obj):
# FIXME(inc0): this can return incorrect value if we consider partially
# loaded objects from db and fields which are dropped out differ
if hasattr(obj, 'obj_to_primitive'):
return self.obj_to_primitive() == obj.obj_to_primitive()
return NotImplemented
def __hash__(self):
return super(ComparableVersionedObject, self).__hash__()
def __ne__(self, obj):
if hasattr(obj, 'obj_to_primitive'):
return self.obj_to_primitive() != obj.obj_to_primitive()
return NotImplemented
class TimestampedObject(object):
"""Mixin class for db backed objects with timestamp fields.
Sqlalchemy models that inherit from the oslo_db TimestampMixin will include
these fields and the corresponding objects will benefit from this mixin.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
}
class VersionedObjectDictCompat(object):
"""Mix-in to provide dictionary key access compatibility
If an object needs to support attribute access using
dictionary items instead of object attributes, inherit
from this class. This should only be used as a temporary
measure until all callers are converted to use modern
attribute access.
"""
def __iter__(self):
for name in self.obj_fields:
if (self.obj_attr_is_set(name) or
name in self.obj_extra_fields):
yield name
keys = __iter__
def values(self):
for name in self:
yield getattr(self, name)
def items(self):
for name in self:
yield name, getattr(self, name)
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def get(self, key, value=_NotSpecifiedSentinel):
if key not in self.obj_fields:
raise AttributeError("'%s' object has no attribute '%s'" % (
self.__class__, key))
if value != _NotSpecifiedSentinel and not self.obj_attr_is_set(key):
return value
else:
return getattr(self, key)
def update(self, updates):
for key, value in updates.items():
setattr(self, key, value)
class ObjectListBase(collections_abc.Sequence):
"""Mixin class for lists of objects.
This mixin class can be added as a base class for an object that
is implementing a list of objects. It adds a single field of 'objects',
which is the list store, and behaves like a list itself. It supports
serialization of the list of objects automatically.
"""
fields = {
'objects': obj_fields.ListOfObjectsField('VersionedObject'),
}
# This is a dictionary of my_version:child_version mappings so that
# we can support backleveling our contents based on the version
# requested of the list object.
child_versions = {}
def __init__(self, *args, **kwargs):
super(ObjectListBase, self).__init__(*args, **kwargs)
if 'objects' not in kwargs:
self.objects = []
self._changed_fields.discard('objects')
def __len__(self):
"""List length."""
return len(self.objects)
def __getitem__(self, index):
"""List index access."""
if isinstance(index, slice):
new_obj = self.__class__()
new_obj.objects = self.objects[index]
# NOTE(danms): We must be mixed in with a VersionedObject!
new_obj.obj_reset_changes()
new_obj._context = self._context
return new_obj
return self.objects[index]
def sort(self, key=None, reverse=False):
self.objects.sort(key=key, reverse=reverse)
def obj_make_compatible(self, primitive, target_version):
# Give priority to using child_versions, if that isn't set, try
# obj_relationships
if self.child_versions:
relationships = self.child_versions.items()
else:
try:
relationships = self._obj_relationship_for('objects',
target_version)
except exception.ObjectActionError:
# No relationship for this found in manifest or
# in obj_relationships
relationships = {}
try:
# NOTE(rlrossit): If we have no version information, just
# backport to child version 1.0 (maintaining default
# behavior)
if relationships:
_get_subobject_version(target_version, relationships,
lambda ver: _do_subobject_backport(
ver, self, 'objects', primitive))
else:
_do_subobject_backport('1.0', self, 'objects', primitive)
except exception.TargetBeforeSubobjectExistedException:
# Child did not exist, so delete it from the primitive
del primitive['objects']
def obj_what_changed(self):
changes = set(self._changed_fields)
for child in self.objects:
if child.obj_what_changed():
changes.add('objects')
return changes
def __add__(self, other):
# Handling arbitrary fields may not make sense if those fields are not
# all concatenatable. Only concatenate if the base 'objects' field is
# the only one and the classes match.
if (self.__class__ == other.__class__ and
list(self.__class__.fields.keys()) == ['objects']):
return self.__class__(objects=self.objects + other.objects)
else:
raise TypeError("List Objects should be of the same type and only "
"have an 'objects' field")
def __radd__(self, other):
if (self.__class__ == other.__class__ and
list(self.__class__.fields.keys()) == ['objects']):
# This should never be run in practice. If the above condition is
# met then __add__ would have been run.
raise NotImplementedError('__radd__ is not implemented for '
'objects of the same type')
else:
raise TypeError("List Objects should be of the same type and only "
"have an 'objects' field")
class VersionedObjectSerializer(messaging.NoOpSerializer):
"""A VersionedObject-aware Serializer.
This implements the Oslo Serializer interface and provides the
ability to serialize and deserialize VersionedObject entities. Any service
that needs to accept or return VersionedObjects as arguments or result
values should pass this to its RPCClient and RPCServer objects.
"""
# Base class to use for object hydration
OBJ_BASE_CLASS = VersionedObject
def _do_backport(self, context, objprim, objclass):
obj_versions = obj_tree_get_versions(objclass.obj_name())
indirection_api = self.OBJ_BASE_CLASS.indirection_api
try:
return indirection_api.object_backport_versions(
context, objprim, obj_versions)
except NotImplementedError:
# FIXME(danms): Maybe start to warn here about deprecation?
return indirection_api.object_backport(context, objprim,
objclass.VERSION)
def _process_object(self, context, objprim):
try:
return self.OBJ_BASE_CLASS.obj_from_primitive(
objprim, context=context)
except exception.IncompatibleObjectVersion:
with excutils.save_and_reraise_exception(reraise=False) as ctxt:
verkey = \
'%s.version' % self.OBJ_BASE_CLASS.OBJ_SERIAL_NAMESPACE
objver = objprim[verkey]
if objver.count('.') == 2:
# NOTE(danms): For our purposes, the .z part of the version
# should be safe to accept without requiring a backport
objprim[verkey] = \
'.'.join(objver.split('.')[:2])
return self._process_object(context, objprim)
namekey = '%s.name' % self.OBJ_BASE_CLASS.OBJ_SERIAL_NAMESPACE
objname = objprim[namekey]
supported = VersionedObjectRegistry.obj_classes().get(objname,
[])
if self.OBJ_BASE_CLASS.indirection_api and supported:
return self._do_backport(context, objprim, supported[0])
else:
ctxt.reraise = True
def _process_iterable(self, context, action_fn, values):
"""Process an iterable, taking an action on each value.
:param:context: Request context
:param:action_fn: Action to take on each item in values
:param:values: Iterable container of things to take action on
:returns: A new container of the same type (except set) with
items from values having had action applied.
"""
iterable = values.__class__
if issubclass(iterable, dict):
return iterable([(k, action_fn(context, v))
for k, v in values.items()])
else:
# NOTE(danms, gibi) A set can't have an unhashable value inside,
# such as a dict. Convert the set to list, which is fine, since we
# can't send them over RPC anyway. We convert it to list as this
# way there will be no semantic change between the fake rpc driver
# used in functional test and a normal rpc driver.
if iterable == set:
iterable = list
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
entity = entity.obj_to_primitive()
return entity
def deserialize_entity(self, context, entity):
namekey = '%s.name' % self.OBJ_BASE_CLASS.OBJ_SERIAL_NAMESPACE
if isinstance(entity, dict) and namekey in entity:
entity = self._process_object(context, entity)
elif isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
class VersionedObjectIndirectionAPI(object, metaclass=abc.ABCMeta):
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on a VersionedObject instance.
When indirection_api is set on a VersionedObject (to a class
implementing this interface), method calls on remotable methods
will cause this to be executed to actually make the desired
call. This often involves performing RPC.
:param context: The context within which to perform the action
:param objinst: The object instance on which to perform the action
:param objmethod: The name of the action method to call
:param args: The positional arguments to the action method
:param kwargs: The keyword arguments to the action method
:returns: The result of the action method
"""
pass
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
""".. deprecated:: 0.10.0
Use :func:`object_class_action_versions` instead.
Perform an action on a VersionedObject class.
When indirection_api is set on a VersionedObject (to a class
implementing this interface), classmethod calls on
remotable_classmethod methods will cause this to be executed to
actually make the desired call. This usually involves performing
RPC.
:param context: The context within which to perform the action
:param objname: The registry name of the object
:param objmethod: The name of the action method to call
:param objver: The (remote) version of the object on which the
action is being taken
:param args: The positional arguments to the action method
:param kwargs: The keyword arguments to the action method
:returns: The result of the action method, which may (or may not)
be an instance of the implementing VersionedObject class.
"""
pass
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
"""Perform an action on a VersionedObject class.
When indirection_api is set on a VersionedObject (to a class
implementing this interface), classmethod calls on
remotable_classmethod methods will cause this to be executed to
actually make the desired call. This usually involves performing
RPC.
This differs from object_class_action() in that it is provided
with object_versions, a manifest of client-side object versions
for easier nested backports. The manifest is the result of
calling obj_tree_get_versions().
NOTE: This was not in the initial spec for this interface, so the
base class raises NotImplementedError if you don't implement it.
For backports, this method will be tried first, and if unimplemented,
will fall back to object_class_action(). New implementations should
provide this method instead of object_class_action()
:param context: The context within which to perform the action
:param objname: The registry name of the object
:param objmethod: The name of the action method to call
:param object_versions: A dict of {objname: version} mappings
:param args: The positional arguments to the action method
:param kwargs: The keyword arguments to the action method
:returns: The result of the action method, which may (or may not)
be an instance of the implementing VersionedObject class.
"""
warnings.warn('object_class_action() is deprecated in favor of '
'object_class_action_versions() and will be removed '
'in a later release', DeprecationWarning)
raise NotImplementedError('Multi-version class action not supported')
def object_backport(self, context, objinst, target_version):
""".. deprecated:: 0.10.0
Use :func:`object_backport_versions` instead.
Perform a backport of an object instance to a specified version.
When indirection_api is set on a VersionedObject (to a class
implementing this interface), the default behavior of the base
VersionedObjectSerializer, upon receiving an object with a version
newer than what is in the lcoal registry, is to call this method to
request a backport of the object. In an environment where there is
an RPC-able service on the bus which can gracefully downgrade newer
objects for older services, this method services as a translation
mechanism for older code when receiving objects from newer code.
NOTE: This older/original method is soon to be deprecated. When a
backport is required, the newer object_backport_versions() will be
tried, and if it raises NotImplementedError, then we will fall back
to this (less optimal) method.
:param context: The context within which to perform the backport
:param objinst: An instance of a VersionedObject to be backported
:param target_version: The maximum version of the objinst's class
that is understood by the requesting host.
:returns: The downgraded instance of objinst
"""
pass
def object_backport_versions(self, context, objinst, object_versions):
"""Perform a backport of an object instance.
This method is basically just like object_backport() but instead of
providing a specific target version for the toplevel object and
relying on the service-side mapping to handle sub-objects, this sends
a mapping of all the dependent objects and their client-supported
versions. The server will backport objects within the tree starting
at objinst to the versions specified in object_versions, removing
objects that have no entry. Use obj_tree_get_versions() to generate
this mapping.
NOTE: This was not in the initial spec for this interface, so the
base class raises NotImplementedError if you don't implement it.
For backports, this method will be tried first, and if unimplemented,
will fall back to object_backport().
:param context: The context within which to perform the backport
:param objinst: An instance of a VersionedObject to be backported
:param object_versions: A dict of {objname: version} mappings
"""
warnings.warn('object_backport() is deprecated in favor of '
'object_backport_versions() and will be removed '
'in a later release', DeprecationWarning)
raise NotImplementedError('Multi-version backport not supported')
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
"""Construct an object list from a list of primitives.
This calls item_cls._from_db_object() on each item of db_list, and
adds the resulting object to list_obj.
:param:context: Request context
:param:list_obj: An ObjectListBase object
:param:item_cls: The VersionedObject class of the objects within the list
:param:db_list: The list of primitives to convert to objects
:param:extra_args: Extra arguments to pass to _from_db_object()
:returns: list_obj
"""
list_obj.objects = []
for db_item in db_list:
item = item_cls._from_db_object(context, item_cls(), db_item,
**extra_args)
list_obj.objects.append(item)
list_obj._context = context
list_obj.obj_reset_changes()
return list_obj
def obj_tree_get_versions(objname, tree=None):
"""Construct a mapping of dependent object versions.
This method builds a list of dependent object versions given a top-
level object with other objects as fields. It walks the tree recursively
to determine all the objects (by symbolic name) that could be contained
within the top-level object, and the maximum versions of each. The result
is a dict like::
{'MyObject': '1.23', ... }
:param objname: The top-level object at which to start
:param tree: Used internally, pass None here.
:returns: A dictionary of object names and versions
"""
if tree is None:
tree = {}
if objname in tree:
return tree
objclass = VersionedObjectRegistry.obj_classes()[objname][0]
tree[objname] = objclass.VERSION
for field_name in objclass.fields:
field = objclass.fields[field_name]
if isinstance(field, obj_fields.ObjectField):
child_cls = field._type._obj_name
elif isinstance(field, obj_fields.ListOfObjectsField):
child_cls = field._type._element_type._type._obj_name
else:
continue
try:
obj_tree_get_versions(child_cls, tree=tree)
except IndexError:
raise exception.UnregisteredSubobject(
child_objname=child_cls, parent_objname=objname)
return tree
def _get_subobject_version(tgt_version, relationships, backport_func):
"""Get the version to which we need to convert a subobject.
This uses the relationships between a parent and a subobject,
along with the target parent version, to decide the version we need
to convert a subobject to. If the subobject did not exist in the parent at
the target version, TargetBeforeChildExistedException is raised. If there
is a need to backport, backport_func is called and the subobject version
to backport to is passed in.
:param tgt_version: The version we are converting the parent to
:param relationships: A list of (parent, subobject) version tuples
:param backport_func: A backport function that takes in the subobject
version
:returns: The version we need to convert the subobject to
"""
tgt = vutils.convert_version_to_tuple(tgt_version)
for index, versions in enumerate(relationships):
parent, child = versions
parent = vutils.convert_version_to_tuple(parent)
if tgt < parent:
if index == 0:
# We're backporting to a version of the parent that did
# not contain this subobject
raise exception.TargetBeforeSubobjectExistedException(
target_version=tgt_version)
else:
# We're in a gap between index-1 and index, so set the desired
# version to the previous index's version
child = relationships[index - 1][1]
backport_func(child)
return
elif tgt == parent:
# We found the version we want, so backport to it
backport_func(child)
return
def _do_subobject_backport(to_version, parent, field, primitive):
obj = getattr(parent, field)
manifest = (hasattr(parent, '_obj_version_manifest') and
parent._obj_version_manifest or None)
if isinstance(obj, VersionedObject):
obj.obj_make_compatible_from_manifest(
obj._obj_primitive_field(primitive[field], 'data'),
to_version, version_manifest=manifest)
ver_key = obj._obj_primitive_key('version')
primitive[field][ver_key] = to_version
elif isinstance(obj, list):
for i, element in enumerate(obj):
element.obj_make_compatible_from_manifest(
element._obj_primitive_field(primitive[field][i], 'data'),
to_version, version_manifest=manifest)
ver_key = element._obj_primitive_key('version')
primitive[field][i][ver_key] = to_version
| apache-2.0 | -5,300,163,210,659,548,000 | 41.287954 | 79 | 0.612491 | false | 4.523255 | false | false | false |
henkhaus/wow | testing/plotter.py | 1 | 1278 | from pymongo import MongoClient
from matplotlib import pyplot as plt
import os
from datetime import datetime, date, time, timedelta
client = MongoClient()
# using wowtest.auctiondata
db = client.wowtest
posts = db.auctiondata
auctions = posts.find().limit(10)
#time.time() into datetime --->
#datetime.datetime.fromtimestamp('xxxx').strftime('%c')
def dt_to_timestamp(dt):
#timestamp = (dt - datetime(1970, 1, 1).total_seconds())
return (int(dt.strftime('%s')))
def getdata(num, quantum):
valid = []
today = datetime.combine(date.today(), time())
for i in range(num+1):
day = today - i*quantum
gte = dt_to_timestamp(day)
lt = dt_to_timestamp(day+quantum)
time_query = {'$gte':gte, '$lt':lt}
valid.insert(0, posts.find({'viewtime':time_query}).count())
return valid
def format_date(x, n):
today = datetime.combine(date.today(), time())
day = today - timedelta(hours=n-x-1)
return day.strftime('%m%d%H')
def plotbar(data, color):
plt.bar(range(len(data)), data, align='center', color=color)
# run
n = 48
val = getdata(n, timedelta(hours=1))
plotbar(val, '#4788d2')
plt.xticks(range(n), [format_date(i, n) for i in range(n)], size='small', rotation=90)
plt.grid(axis='y')
plt.show()
| apache-2.0 | 1,368,956,586,611,997,000 | 22.666667 | 86 | 0.649452 | false | 3.079518 | false | false | false |
davy39/eric | Graphics/UMLDialog.py | 1 | 14316 | # -*- coding: utf-8 -*-
# Copyright (c) 2007 - 2014 Detlev Offenbach <[email protected]>
#
"""
Module implementing a dialog showing UML like diagrams.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSlot, Qt, QFileInfo
from PyQt5.QtWidgets import QAction, QToolBar, QGraphicsScene
from E5Gui import E5MessageBox, E5FileDialog
from E5Gui.E5MainWindow import E5MainWindow
import UI.Config
import UI.PixmapCache
class UMLDialog(E5MainWindow):
"""
Class implementing a dialog showing UML like diagrams.
"""
NoDiagram = 255
ClassDiagram = 0
PackageDiagram = 1
ImportsDiagram = 2
ApplicationDiagram = 3
FileVersions = ["1.0"]
def __init__(self, diagramType, project, path="", parent=None,
initBuilder=True, **kwargs):
"""
Constructor
@param diagramType type of the diagram (one of ApplicationDiagram,
ClassDiagram, ImportsDiagram, NoDiagram, PackageDiagram)
@param project reference to the project object (Project)
@param path file or directory path to build the diagram from (string)
@param parent parent widget of the dialog (QWidget)
@keyparam initBuilder flag indicating to initialize the diagram
builder (boolean)
@keyparam kwargs diagram specific data
"""
super(UMLDialog, self).__init__(parent)
self.setObjectName("UMLDialog")
self.__diagramType = diagramType
self.__project = project
from .UMLGraphicsView import UMLGraphicsView
self.scene = QGraphicsScene(0.0, 0.0, 800.0, 600.0)
self.umlView = UMLGraphicsView(self.scene, parent=self)
self.builder = self.__diagramBuilder(
self.__diagramType, path, **kwargs)
if self.builder and initBuilder:
self.builder.initialize()
self.__fileName = ""
self.__initActions()
self.__initToolBars()
self.setCentralWidget(self.umlView)
self.umlView.relayout.connect(self.__relayout)
self.setWindowTitle(self.__diagramTypeString())
def __initActions(self):
"""
Private slot to initialize the actions.
"""
self.closeAct = \
QAction(UI.PixmapCache.getIcon("close.png"),
self.tr("Close"), self)
self.closeAct.triggered.connect(self.close)
self.openAct = \
QAction(UI.PixmapCache.getIcon("open.png"),
self.tr("Load"), self)
self.openAct.triggered.connect(self.load)
self.saveAct = \
QAction(UI.PixmapCache.getIcon("fileSave.png"),
self.tr("Save"), self)
self.saveAct.triggered.connect(self.__save)
self.saveAsAct = \
QAction(UI.PixmapCache.getIcon("fileSaveAs.png"),
self.tr("Save As..."), self)
self.saveAsAct.triggered.connect(self.__saveAs)
self.saveImageAct = \
QAction(UI.PixmapCache.getIcon("fileSavePixmap.png"),
self.tr("Save as Image"), self)
self.saveImageAct.triggered.connect(self.umlView.saveImage)
self.printAct = \
QAction(UI.PixmapCache.getIcon("print.png"),
self.tr("Print"), self)
self.printAct.triggered.connect(self.umlView.printDiagram)
self.printPreviewAct = \
QAction(UI.PixmapCache.getIcon("printPreview.png"),
self.tr("Print Preview"), self)
self.printPreviewAct.triggered.connect(
self.umlView.printPreviewDiagram)
def __initToolBars(self):
"""
Private slot to initialize the toolbars.
"""
self.windowToolBar = QToolBar(self.tr("Window"), self)
self.windowToolBar.setIconSize(UI.Config.ToolBarIconSize)
self.windowToolBar.addAction(self.closeAct)
self.fileToolBar = QToolBar(self.tr("File"), self)
self.fileToolBar.setIconSize(UI.Config.ToolBarIconSize)
self.fileToolBar.addAction(self.openAct)
self.fileToolBar.addSeparator()
self.fileToolBar.addAction(self.saveAct)
self.fileToolBar.addAction(self.saveAsAct)
self.fileToolBar.addAction(self.saveImageAct)
self.fileToolBar.addSeparator()
self.fileToolBar.addAction(self.printPreviewAct)
self.fileToolBar.addAction(self.printAct)
self.umlToolBar = self.umlView.initToolBar()
self.addToolBar(Qt.TopToolBarArea, self.fileToolBar)
self.addToolBar(Qt.TopToolBarArea, self.windowToolBar)
self.addToolBar(Qt.TopToolBarArea, self.umlToolBar)
def show(self, fromFile=False):
"""
Public method to show the dialog.
@keyparam fromFile flag indicating, that the diagram was loaded
from file (boolean)
"""
if not fromFile and self.builder:
self.builder.buildDiagram()
super(UMLDialog, self).show()
def __relayout(self):
"""
Private method to relayout the diagram.
"""
if self.builder:
self.builder.buildDiagram()
def __diagramBuilder(self, diagramType, path, **kwargs):
"""
Private method to instantiate a diagram builder object.
@param diagramType type of the diagram
(one of ApplicationDiagram, ClassDiagram, ImportsDiagram,
PackageDiagram)
@param path file or directory path to build the diagram from (string)
@keyparam kwargs diagram specific data
@return reference to the instantiated diagram builder
@exception ValueError raised to indicate an illegal diagram type
"""
if diagramType == UMLDialog.ClassDiagram:
from .UMLClassDiagramBuilder import UMLClassDiagramBuilder
return UMLClassDiagramBuilder(
self, self.umlView, self.__project, path, **kwargs)
elif diagramType == UMLDialog.PackageDiagram:
from .PackageDiagramBuilder import PackageDiagramBuilder
return PackageDiagramBuilder(
self, self.umlView, self.__project, path, **kwargs)
elif diagramType == UMLDialog.ImportsDiagram:
from .ImportsDiagramBuilder import ImportsDiagramBuilder
return ImportsDiagramBuilder(
self, self.umlView, self.__project, path, **kwargs)
elif diagramType == UMLDialog.ApplicationDiagram:
from .ApplicationDiagramBuilder import ApplicationDiagramBuilder
return ApplicationDiagramBuilder(
self, self.umlView, self.__project, **kwargs)
elif diagramType == UMLDialog.NoDiagram:
return None
else:
raise ValueError(self.tr(
"Illegal diagram type '{0}' given.").format(diagramType))
def __diagramTypeString(self):
"""
Private method to generate a readable string for the diagram type.
@return readable type string (string)
"""
if self.__diagramType == UMLDialog.ClassDiagram:
return "Class Diagram"
elif self.__diagramType == UMLDialog.PackageDiagram:
return "Package Diagram"
elif self.__diagramType == UMLDialog.ImportsDiagram:
return "Imports Diagram"
elif self.__diagramType == UMLDialog.ApplicationDiagram:
return "Application Diagram"
else:
return "Illegal Diagram Type"
def __save(self):
"""
Private slot to save the diagram with the current name.
"""
self.__saveAs(self.__fileName)
@pyqtSlot()
def __saveAs(self, filename=""):
"""
Private slot to save the diagram.
@param filename name of the file to write to (string)
"""
if not filename:
fname, selectedFilter = E5FileDialog.getSaveFileNameAndFilter(
self,
self.tr("Save Diagram"),
"",
self.tr("Eric Graphics File (*.e5g);;All Files (*)"),
"",
E5FileDialog.Options(E5FileDialog.DontConfirmOverwrite))
if not fname:
return
ext = QFileInfo(fname).suffix()
if not ext:
ex = selectedFilter.split("(*")[1].split(")")[0]
if ex:
fname += ex
if QFileInfo(fname).exists():
res = E5MessageBox.yesNo(
self,
self.tr("Save Diagram"),
self.tr("<p>The file <b>{0}</b> already exists."
" Overwrite it?</p>").format(fname),
icon=E5MessageBox.Warning)
if not res:
return
filename = fname
lines = [
"version: 1.0",
"diagram_type: {0} ({1})".format(
self.__diagramType, self.__diagramTypeString()),
"scene_size: {0};{1}".format(self.scene.width(),
self.scene.height()),
]
persistenceData = self.builder.getPersistenceData()
if persistenceData:
lines.append("builder_data: {0}".format(persistenceData))
lines.extend(self.umlView.getPersistenceData())
try:
f = open(filename, "w", encoding="utf-8")
f.write("\n".join(lines))
f.close()
except (IOError, OSError) as err:
E5MessageBox.critical(
self,
self.tr("Save Diagram"),
self.tr(
"""<p>The file <b>{0}</b> could not be saved.</p>"""
"""<p>Reason: {1}</p>""").format(filename, str(err)))
return
self.__fileName = filename
def load(self):
"""
Public method to load a diagram from a file.
@return flag indicating success (boolean)
"""
filename = E5FileDialog.getOpenFileName(
self,
self.tr("Load Diagram"),
"",
self.tr("Eric Graphics File (*.e5g);;All Files (*)"))
if not filename:
# Cancelled by user
return False
try:
f = open(filename, "r", encoding="utf-8")
data = f.read()
f.close()
except (IOError, OSError) as err:
E5MessageBox.critical(
self,
self.tr("Load Diagram"),
self.tr(
"""<p>The file <b>{0}</b> could not be read.</p>"""
"""<p>Reason: {1}</p>""").format(filename, str(err)))
return False
lines = data.splitlines()
if len(lines) < 3:
self.__showInvalidDataMessage(filename)
return False
try:
# step 1: check version
linenum = 0
key, value = lines[linenum].split(": ", 1)
if key.strip() != "version" or \
value.strip() not in UMLDialog.FileVersions:
self.__showInvalidDataMessage(filename, linenum)
return False
else:
version = value
# step 2: extract diagram type
linenum += 1
key, value = lines[linenum].split(": ", 1)
if key.strip() != "diagram_type":
self.__showInvalidDataMessage(filename, linenum)
return False
try:
self.__diagramType = int(value.strip().split(None, 1)[0])
except ValueError:
self.__showInvalidDataMessage(filename, linenum)
return False
self.scene.clear()
self.builder = self.__diagramBuilder(self.__diagramType, "")
# step 3: extract scene size
linenum += 1
key, value = lines[linenum].split(": ", 1)
if key.strip() != "scene_size":
self.__showInvalidDataMessage(filename, linenum)
return False
try:
width, height = [float(v.strip()) for v in value.split(";")]
except ValueError:
self.__showInvalidDataMessage(filename, linenum)
return False
self.umlView.setSceneSize(width, height)
# step 4: extract builder data if available
linenum += 1
key, value = lines[linenum].split(": ", 1)
if key.strip() == "builder_data":
ok = self.builder.parsePersistenceData(version, value)
if not ok:
self.__showInvalidDataMessage(filename, linenum)
return False
linenum += 1
# step 5: extract the graphics items
ok, vlinenum = self.umlView.parsePersistenceData(
version, lines[linenum:])
if not ok:
self.__showInvalidDataMessage(filename, linenum + vlinenum)
return False
except IndexError:
self.__showInvalidDataMessage(filename)
return False
# everything worked fine, so remember the file name
self.__fileName = filename
return True
def __showInvalidDataMessage(self, filename, linenum=-1):
"""
Private slot to show a message dialog indicating an invalid data file.
@param filename name of the file containing the invalid data (string)
@param linenum number of the invalid line (integer)
"""
if linenum < 0:
msg = self.tr("""<p>The file <b>{0}</b> does not contain"""
""" valid data.</p>""").format(filename)
else:
msg = self.tr("""<p>The file <b>{0}</b> does not contain"""
""" valid data.</p><p>Invalid line: {1}</p>"""
).format(filename, linenum + 1)
E5MessageBox.critical(self, self.tr("Load Diagram"), msg)
| gpl-3.0 | 6,361,713,874,534,443,000 | 36.37859 | 78 | 0.551132 | false | 4.452877 | false | false | false |
kylefrost/budget | main.py | 1 | 1783 | from flask import Flask, render_template, request, redirect
from sql import select
# Create Flask app
app = Flask(__name__)
# API Blueprint
from api import api
app.register_blueprint(api, url_prefix="/api")
# Load Index page
@app.route("/")
def index():
return render_template("index.html")
# --------------- BILLS --------------- #
# Bills page
@app.route("/bills")
def bills():
bills = select("bills")
return render_template("bills.html", bills=bills)
# Add Bill page
@app.route("/bills/add")
def bills_add():
return render_template("bills_add.html")
# Edit Bill page
@app.route("/bills/edit")
def bills_edit():
return render_template("bills_edit.html")
# --------------- SPENDING --------------- #
# Spending page
@app.route("/spending")
def spending():
spending = select("spending")
return render_template("spending.html", spending=spending)
# Add Spending page
@app.route("/spending/add")
def spending_add():
accounts = select("accounts")
return render_template("spending_add.html", accounts=accounts)
# Edit Spending page
@app.route("/spending/edit")
def spending_edit():
return render_template("spending_edit.html")
# --------------- ACCOUNTS --------------- #
# Accounts page
@app.route("/accounts")
def accounts():
accounts = select("accounts")
return render_template("accounts.html", accounts=accounts)
# Add Account page
@app.route("/accounts/add")
def accounts_add():
return render_template("accounts_add.html")
# Edit Account page
@app.route("/accounts/edit")
def accounts_edit():
return render_template("accounts_edit.html")
# Run Flask app on load
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
| gpl-3.0 | -3,901,554,615,492,134,400 | 21.155844 | 66 | 0.621985 | false | 3.301852 | false | false | false |
ApptuitAI/xcollector | collectors/etc/metric_naming.py | 1 | 1357 | #!/usr/bin/env python
def print_if_apptuit_standard_metric(metric, mapping, timestamp, value, tags=None, tags_str=None):
if metric not in list(mapping["metrics"].keys()):
return
new_metric_name = mapping["metrics"][metric]["standard_name"]
new_metric_tags_str = ""
if tags is not None or tags_str is not None or "tags" in mapping or "tags" in mapping["metrics"][metric]:
new_metric_tags = {}
if tags is not None:
for tag in tags:
new_metric_tags[tag] = tags[tag]
if "tags" in mapping:
for tag in mapping["tags"]:
new_metric_tags[tag] = mapping["tags"][tag]
if "tags" in mapping["metrics"][metric]:
for tag in mapping["metrics"][metric]["tags"]:
new_metric_tags[tag] = mapping["metrics"][metric]["tags"][tag]
for i, tag in enumerate(new_metric_tags):
if i != len(new_metric_tags):
new_metric_tags_str += tag + "=" + new_metric_tags[tag] + " "
else:
new_metric_tags_str += tag + "=" + new_metric_tags[tag]
if tags_str is not None:
new_metric_tags_str = new_metric_tags_str.strip()
new_metric_tags_str += " " + tags_str.strip()
print("%s %d %s %s"
% (new_metric_name, timestamp, value, new_metric_tags_str))
| lgpl-3.0 | -2,912,226,242,471,547,400 | 47.464286 | 109 | 0.559322 | false | 3.609043 | false | false | false |
danielfrg/ec2hosts | ec2hosts/cli.py | 1 | 1208 | from __future__ import print_function, absolute_import, division
import sys
import click
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
import ec2hosts
def main():
try:
cli(obj={})
except Exception as e:
import traceback
click.echo(traceback.format_exc(), err=True)
sys.exit(1)
@click.group(invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(prog_name='Anaconda Cluster', version=ec2hosts.__version__)
@click.pass_context
def cli(ctx):
ctx.obj = {}
if ctx.invoked_subcommand is None:
ctx.invoke(run)
@cli.command(short_help='Run')
@click.pass_context
def run(ctx):
click.echo("New /etc/hosts file:")
content = ec2hosts.gen_file()
click.echo(content)
if click.confirm('Do you want to continue?'):
ec2hosts.write(content)
ec2hosts.move()
@cli.command(short_help='Clean')
@click.pass_context
def clean(ctx):
click.echo("New /etc/hosts file:")
content = ec2hosts.read_file()
content = ec2hosts.clean(ec2hosts.read_file())
click.echo(content)
if click.confirm('Do you want to continue?'):
ec2hosts.write(content)
ec2hosts.move()
| apache-2.0 | 8,346,817,316,003,575,000 | 23.16 | 81 | 0.663907 | false | 3.309589 | false | false | false |
brupoon/mustachedNinja | any_lowercase_test.py | 1 | 1080 | #Chapter 9, Exercise 11...any_lowercase tests
def any_lowercase1(s):
for c in s:
if c.islower():
return True
else:
return False
def any_lowercase2(s):
for c in s:
if 'c'.islower():
return 'True'
else:
return 'False'
def any_lowercase3(s):
for c in s:
flag = c.islower()
return flag
def any_lowercase4(s):
flag = False
for c in s:
flag = flag or c.islower()
return flag
def any_lowercase5(s):
for c in s:
if not c.islower():
return False
return True
if __name__ == '__main__':
if any_lowercase2("test") == True: print("all lower: true")
else: print("all lower: false")
if any_lowercase2("Test") == True: print("firstupper: true")
else: print("firstupper: false")
if any_lowercase2("tEst") == True: print("middleupper: true")
else: print("middleupper: false")
if any_lowercase2("TEST") == True: print("all upper: true")
else: print("all upper: false") | mit | 566,311,769,260,755,700 | 23.761905 | 65 | 0.542593 | false | 3.624161 | false | false | false |
mpunkenhofer/irc-telegram-bot | telepot/telepot/aio/__init__.py | 1 | 26958 | import io
import json
import time
import asyncio
import traceback
import collections
from concurrent.futures._base import CancelledError
from . import helper, api
from .. import _BotBase, flavor, _find_first_key, _isstring, _dismantle_message_identifier, _strip, _rectify
# Patch aiohttp for sending unicode filename
from . import hack
from .. import exception
def flavor_router(routing_table):
router = helper.Router(flavor, routing_table)
return router.route
class Bot(_BotBase):
class Scheduler(object):
def __init__(self, loop):
self._loop = loop
self._callback = None
def event_at(self, when, data):
delay = when - time.time()
return self._loop.call_later(delay, self._callback, data)
# call_at() uses event loop time, not unix time.
# May as well use call_later here.
def event_later(self, delay, data):
return self._loop.call_later(delay, self._callback, data)
def event_now(self, data):
return self._loop.call_soon(self._callback, data)
def cancel(self, event):
return event.cancel()
def __init__(self, token, loop=None):
super(Bot, self).__init__(token)
self._loop = loop if loop is not None else asyncio.get_event_loop()
self._scheduler = self.Scheduler(self._loop)
self._router = helper.Router(flavor, {'chat': helper._delay_yell(self, 'on_chat_message'),
'callback_query': helper._delay_yell(self, 'on_callback_query'),
'inline_query': helper._delay_yell(self, 'on_inline_query'),
'chosen_inline_result': helper._delay_yell(self, 'on_chosen_inline_result')})
@property
def loop(self):
return self._loop
@property
def scheduler(self):
return self._scheduler
@property
def router(self):
return self._router
async def handle(self, msg):
await self._router.route(msg)
async def _api_request(self, method, params=None, files=None, **kwargs):
return await api.request((self._token, method, params, files), **kwargs)
async def getMe(self):
""" See: https://core.telegram.org/bots/api#getme """
return await self._api_request('getMe')
async def sendMessage(self, chat_id, text,
parse_mode=None, disable_web_page_preview=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendmessage """
p = _strip(locals())
return await self._api_request('sendMessage', _rectify(p))
async def forwardMessage(self, chat_id, from_chat_id, message_id, disable_notification=None):
""" See: https://core.telegram.org/bots/api#forwardmessage """
p = _strip(locals())
return await self._api_request('forwardMessage', _rectify(p))
async def _sendfile(self, inputfile, filetype, params):
method = {'photo': 'sendPhoto',
'audio': 'sendAudio',
'document': 'sendDocument',
'sticker': 'sendSticker',
'video': 'sendVideo',
'voice': 'sendVoice',}[filetype]
if _isstring(inputfile):
params[filetype] = inputfile
return await self._api_request(method, _rectify(params))
else:
files = {filetype: inputfile}
return await self._api_request(method, _rectify(params), files)
async def sendPhoto(self, chat_id, photo,
caption=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendphoto
:param photo:
a string indicating a ``file_id`` on server,
a file-like object as obtained by ``open()`` or ``urlopen()``,
or a (filename, file-like object) tuple.
If the file-like object is obtained by ``urlopen()``, you most likely
have to supply a filename because Telegram servers require to know
the file extension.
If the filename contains non-ASCII characters and you are using Python 2.7,
make sure the filename is a unicode string.
"""
p = _strip(locals(), more=['photo'])
return await self._sendfile(photo, 'photo', p)
async def sendAudio(self, chat_id, audio,
caption=None, duration=None, performer=None, title=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendaudio
:param audio: Same as ``photo`` in :meth:`telepot.aio.Bot.sendPhoto`
"""
p = _strip(locals(), more=['audio'])
return await self._sendfile(audio, 'audio', p)
async def sendDocument(self, chat_id, document,
caption=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#senddocument
:param document: Same as ``photo`` in :meth:`telepot.aio.Bot.sendPhoto`
"""
p = _strip(locals(), more=['document'])
return await self._sendfile(document, 'document', p)
async def sendSticker(self, chat_id, sticker,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendsticker
:param sticker: Same as ``photo`` in :meth:`telepot.aio.Bot.sendPhoto`
"""
p = _strip(locals(), more=['sticker'])
return await self._sendfile(sticker, 'sticker', p)
async def sendVideo(self, chat_id, video,
duration=None, width=None, height=None, caption=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvideo
:param video: Same as ``photo`` in :meth:`telepot.aio.Bot.sendPhoto`
"""
p = _strip(locals(), more=['video'])
return await self._sendfile(video, 'video', p)
async def sendVoice(self, chat_id, voice,
caption=None, duration=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvoice
:param voice: Same as ``photo`` in :meth:`telepot.aio.Bot.sendPhoto`
"""
p = _strip(locals(), more=['voice'])
return await self._sendfile(voice, 'voice', p)
async def sendLocation(self, chat_id, latitude, longitude,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendlocation """
p = _strip(locals())
return await self._api_request('sendLocation', _rectify(p))
async def sendVenue(self, chat_id, latitude, longitude, title, address,
foursquare_id=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendvenue """
p = _strip(locals())
return await self._api_request('sendVenue', _rectify(p))
async def sendContact(self, chat_id, phone_number, first_name,
last_name=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendcontact """
p = _strip(locals())
return await self._api_request('sendContact', _rectify(p))
async def sendGame(self, chat_id, game_short_name,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendgame """
p = _strip(locals())
return await self._api_request('sendGame', _rectify(p))
async def sendChatAction(self, chat_id, action):
""" See: https://core.telegram.org/bots/api#sendchataction """
p = _strip(locals())
return await self._api_request('sendChatAction', _rectify(p))
async def getUserProfilePhotos(self, user_id, offset=None, limit=None):
""" See: https://core.telegram.org/bots/api#getuserprofilephotos """
p = _strip(locals())
return await self._api_request('getUserProfilePhotos', _rectify(p))
async def getFile(self, file_id):
""" See: https://core.telegram.org/bots/api#getfile """
p = _strip(locals())
return await self._api_request('getFile', _rectify(p))
async def kickChatMember(self, chat_id, user_id):
""" See: https://core.telegram.org/bots/api#kickchatmember """
p = _strip(locals())
return await self._api_request('kickChatMember', _rectify(p))
async def leaveChat(self, chat_id):
""" See: https://core.telegram.org/bots/api#leavechat """
p = _strip(locals())
return await self._api_request('leaveChat', _rectify(p))
async def unbanChatMember(self, chat_id, user_id):
""" See: https://core.telegram.org/bots/api#unbanchatmember """
p = _strip(locals())
return await self._api_request('unbanChatMember', _rectify(p))
async def getChat(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchat """
p = _strip(locals())
return await self._api_request('getChat', _rectify(p))
async def getChatAdministrators(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatadministrators """
p = _strip(locals())
return await self._api_request('getChatAdministrators', _rectify(p))
async def getChatMembersCount(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatmemberscount """
p = _strip(locals())
return await self._api_request('getChatMembersCount', _rectify(p))
async def getChatMember(self, chat_id, user_id):
""" See: https://core.telegram.org/bots/api#getchatmember """
p = _strip(locals())
return await self._api_request('getChatMember', _rectify(p))
async def answerCallbackQuery(self, callback_query_id,
text=None, show_alert=None, url=None, cache_time=None):
""" See: https://core.telegram.org/bots/api#answercallbackquery """
p = _strip(locals())
return await self._api_request('answerCallbackQuery', _rectify(p))
async def editMessageText(self, msg_identifier, text,
parse_mode=None, disable_web_page_preview=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagetext
:param msg_identifier:
a 2-tuple (``chat_id``, ``message_id``),
a 1-tuple (``inline_message_id``),
or simply ``inline_message_id``.
You may extract this value easily with :meth:`telepot.message_identifier`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return await self._api_request('editMessageText', _rectify(p))
async def editMessageCaption(self, msg_identifier, caption=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagecaption
:param msg_identifier: Same as ``msg_identifier`` in :meth:`telepot.aio.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return await self._api_request('editMessageCaption', _rectify(p))
async def editMessageReplyMarkup(self, msg_identifier, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagereplymarkup
:param msg_identifier: Same as ``msg_identifier`` in :meth:`telepot.aio.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return await self._api_request('editMessageReplyMarkup', _rectify(p))
async def answerInlineQuery(self, inline_query_id, results,
cache_time=None, is_personal=None, next_offset=None,
switch_pm_text=None, switch_pm_parameter=None):
""" See: https://core.telegram.org/bots/api#answerinlinequery """
p = _strip(locals())
return await self._api_request('answerInlineQuery', _rectify(p))
async def getUpdates(self, offset=None, limit=None, timeout=None, allowed_updates=None):
""" See: https://core.telegram.org/bots/api#getupdates """
p = _strip(locals())
return await self._api_request('getUpdates', _rectify(p))
async def setWebhook(self, url=None, certificate=None, max_connections=None, allowed_updates=None):
""" See: https://core.telegram.org/bots/api#setwebhook """
p = _strip(locals(), more=['certificate'])
if certificate:
files = {'certificate': certificate}
return await self._api_request('setWebhook', _rectify(p), files)
else:
return await self._api_request('setWebhook', _rectify(p))
async def deleteWebhook(self):
""" See: https://core.telegram.org/bots/api#deletewebhook """
return await self._api_request('deleteWebhook')
async def getWebhookInfo(self):
""" See: https://core.telegram.org/bots/api#getwebhookinfo """
return await self._api_request('getWebhookInfo')
async def setGameScore(self, user_id, score, game_message_identifier,
force=None, disable_edit_message=None):
""" See: https://core.telegram.org/bots/api#setgamescore """
p = _strip(locals(), more=['game_message_identifier'])
p.update(_dismantle_message_identifier(game_message_identifier))
return await self._api_request('setGameScore', _rectify(p))
async def getGameHighScores(self, user_id, game_message_identifier):
""" See: https://core.telegram.org/bots/api#getgamehighscores """
p = _strip(locals(), more=['game_message_identifier'])
p.update(_dismantle_message_identifier(game_message_identifier))
return await self._api_request('getGameHighScores', _rectify(p))
async def download_file(self, file_id, dest):
"""
Download a file to local disk.
:param dest: a path or a ``file`` object
"""
f = await self.getFile(file_id)
try:
d = dest if isinstance(dest, io.IOBase) else open(dest, 'wb')
async with api.download((self._token, f['file_path'])) as r:
while 1:
chunk = await r.content.read(self._file_chunk_size)
if not chunk:
break
d.write(chunk)
d.flush()
finally:
if not isinstance(dest, io.IOBase) and 'd' in locals():
d.close()
async def message_loop(self, handler=None, relax=0.1,
timeout=20, allowed_updates=None,
source=None, ordered=True, maxhold=3):
"""
Return a task to constantly ``getUpdates`` or pull updates from a queue.
Apply ``handler`` to every message received.
:param handler:
a function that takes one argument (the message), or a routing table.
If ``None``, the bot's ``handle`` method is used.
A *routing table* is a dictionary of ``{flavor: function}``, mapping messages to appropriate
handler functions according to their flavors. It allows you to define functions specifically
to handle one flavor of messages. It usually looks like this: ``{'chat': fn1,
'callback_query': fn2, 'inline_query': fn3, ...}``. Each handler function should take
one argument (the message).
:param source:
Source of updates.
If ``None``, ``getUpdates`` is used to obtain new messages from Telegram servers.
If it is a ``asyncio.Queue``, new messages are pulled from the queue.
A web application implementing a webhook can dump updates into the queue,
while the bot pulls from it. This is how telepot can be integrated with webhooks.
Acceptable contents in queue:
- ``str`` or ``bytes`` (decoded using UTF-8)
representing a JSON-serialized `Update <https://core.telegram.org/bots/api#update>`_ object.
- a ``dict`` representing an Update object.
When ``source`` is a queue, these parameters are meaningful:
:type ordered: bool
:param ordered:
If ``True``, ensure in-order delivery of messages to ``handler``
(i.e. updates with a smaller ``update_id`` always come before those with
a larger ``update_id``).
If ``False``, no re-ordering is done. ``handler`` is applied to messages
as soon as they are pulled from queue.
:type maxhold: float
:param maxhold:
Applied only when ``ordered`` is ``True``. The maximum number of seconds
an update is held waiting for a not-yet-arrived smaller ``update_id``.
When this number of seconds is up, the update is delivered to ``handler``
even if some smaller ``update_id``\s have not yet arrived. If those smaller
``update_id``\s arrive at some later time, they are discarded.
:type timeout: int
:param timeout:
``timeout`` parameter supplied to :meth:`telepot.aio.Bot.getUpdates`,
controlling how long to poll in seconds.
:type allowed_updates: array of string
:param allowed_updates:
``allowed_updates`` parameter supplied to :meth:`telepot.aio.Bot.getUpdates`,
controlling which types of updates to receive.
"""
if handler is None:
handler = self.handle
elif isinstance(handler, dict):
handler = flavor_router(handler)
def create_task_for(msg):
self.loop.create_task(handler(msg))
if asyncio.iscoroutinefunction(handler):
callback = create_task_for
else:
callback = handler
def handle(update):
try:
key = _find_first_key(update, ['message',
'edited_message',
'channel_post',
'edited_channel_post',
'callback_query',
'inline_query',
'chosen_inline_result'])
callback(update[key])
except:
# Localize the error so message thread can keep going.
traceback.print_exc()
finally:
return update['update_id']
async def get_from_telegram_server():
offset = None # running offset
allowed_upd = allowed_updates
while 1:
try:
result = await self.getUpdates(offset=offset,
timeout=timeout,
allowed_updates=allowed_upd)
# Once passed, this parameter is no longer needed.
allowed_upd = None
if len(result) > 0:
# No sort. Trust server to give messages in correct order.
# Update offset to max(update_id) + 1
offset = max([handle(update) for update in result]) + 1
except CancelledError:
raise
except exception.BadHTTPResponse as e:
traceback.print_exc()
# Servers probably down. Wait longer.
if e.status == 502:
await asyncio.sleep(30)
except:
traceback.print_exc()
await asyncio.sleep(relax)
else:
await asyncio.sleep(relax)
def dictify(data):
if type(data) is bytes:
return json.loads(data.decode('utf-8'))
elif type(data) is str:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
async def get_from_queue_unordered(qu):
while 1:
try:
data = await qu.get()
update = dictify(data)
handle(update)
except:
traceback.print_exc()
async def get_from_queue(qu):
# Here is the re-ordering mechanism, ensuring in-order delivery of updates.
max_id = None # max update_id passed to callback
buffer = collections.deque() # keep those updates which skip some update_id
qwait = None # how long to wait for updates,
# because buffer's content has to be returned in time.
while 1:
try:
data = await asyncio.wait_for(qu.get(), qwait)
update = dictify(data)
if max_id is None:
# First message received, handle regardless.
max_id = handle(update)
elif update['update_id'] == max_id + 1:
# No update_id skipped, handle naturally.
max_id = handle(update)
# clear contagious updates in buffer
if len(buffer) > 0:
buffer.popleft() # first element belongs to update just received, useless now.
while 1:
try:
if type(buffer[0]) is dict:
max_id = handle(buffer.popleft()) # updates that arrived earlier, handle them.
else:
break # gap, no more contagious updates
except IndexError:
break # buffer empty
elif update['update_id'] > max_id + 1:
# Update arrives pre-maturely, insert to buffer.
nbuf = len(buffer)
if update['update_id'] <= max_id + nbuf:
# buffer long enough, put update at position
buffer[update['update_id'] - max_id - 1] = update
else:
# buffer too short, lengthen it
expire = time.time() + maxhold
for a in range(nbuf, update['update_id']-max_id-1):
buffer.append(expire) # put expiry time in gaps
buffer.append(update)
else:
pass # discard
except asyncio.TimeoutError:
# debug message
# print('Timeout')
# some buffer contents have to be handled
# flush buffer until a non-expired time is encountered
while 1:
try:
if type(buffer[0]) is dict:
max_id = handle(buffer.popleft())
else:
expire = buffer[0]
if expire <= time.time():
max_id += 1
buffer.popleft()
else:
break # non-expired
except IndexError:
break # buffer empty
except:
traceback.print_exc()
finally:
try:
# don't wait longer than next expiry time
qwait = buffer[0] - time.time()
if qwait < 0:
qwait = 0
except IndexError:
# buffer empty, can wait forever
qwait = None
# debug message
# print ('Buffer:', str(buffer), ', To Wait:', qwait, ', Max ID:', max_id)
self._scheduler._callback = callback
if source is None:
await get_from_telegram_server()
elif isinstance(source, asyncio.Queue):
if ordered:
await get_from_queue(source)
else:
await get_from_queue_unordered(source)
else:
raise ValueError('Invalid source')
class SpeakerBot(Bot):
def __init__(self, token, loop=None):
super(SpeakerBot, self).__init__(token, loop)
self._mic = helper.Microphone()
@property
def mic(self):
return self._mic
def create_listener(self):
q = asyncio.Queue()
self._mic.add(q)
ln = helper.Listener(self._mic, q)
return ln
class DelegatorBot(SpeakerBot):
def __init__(self, token, delegation_patterns, loop=None):
"""
:param delegation_patterns: a list of (seeder, delegator) tuples.
"""
super(DelegatorBot, self).__init__(token, loop)
self._delegate_records = [p+({},) for p in delegation_patterns]
def handle(self, msg):
self._mic.send(msg)
for calculate_seed, make_coroutine_obj, dict in self._delegate_records:
id = calculate_seed(msg)
if id is None:
continue
elif isinstance(id, collections.Hashable):
if id not in dict or dict[id].done():
c = make_coroutine_obj((self, msg, id))
if not asyncio.iscoroutine(c):
raise RuntimeError('You must produce a coroutine *object* as delegate.')
dict[id] = self._loop.create_task(c)
else:
c = make_coroutine_obj((self, msg, id))
self._loop.create_task(c)
| mit | 7,597,339,703,148,460,000 | 41.655063 | 123 | 0.545664 | false | 4.327127 | false | false | false |
amcat/amcat | api/rest/viewsets/coding/codingschemafield.py | 1 | 2536 | ###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
from rest_framework.viewsets import ReadOnlyModelViewSet
from amcat.models import CodingSchemaField
from api.rest.mixins import DatatablesMixin
from api.rest.serializer import AmCATModelSerializer
from api.rest.viewset import AmCATViewSetMixin
from api.rest.viewsets.project import ProjectViewSetMixin
__all__ = ("CodingSchemaFieldViewSetMixin", "CodingSchemaFieldSerializer", "CodingSchemaFieldViewSet")
class CodingSchemaFieldSerializer(AmCATModelSerializer):
class Meta:
model = CodingSchemaField
fields = '__all__'
class CodingSchemaFieldViewSetMixin(AmCATViewSetMixin):
model_key = "codingschemafield"
model = CodingSchemaField
class CodingSchemaFieldViewSet(ProjectViewSetMixin, CodingSchemaFieldViewSetMixin, DatatablesMixin, ReadOnlyModelViewSet):
model = CodingSchemaField
queryset = CodingSchemaField.objects.all()
serializer_class = CodingSchemaFieldSerializer
ordering_fields = ("id", "fieldnr", "name")
def filter_queryset(self, fields):
fields = super(CodingSchemaFieldViewSet, self).filter_queryset(fields)
return fields.filter(codingschema__in=self.project.get_codingschemas(True))
| agpl-3.0 | 930,094,134,368,315,800 | 54.130435 | 122 | 0.594637 | false | 4.793951 | false | false | false |
GETLIMS/LIMS-Backend | lims/shared/migrations/0005_auto_20180301_0958.py | 1 | 1248 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-03-01 09:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shared', '0004_trigger_fire_on_create'),
]
operations = [
migrations.AlterModelOptions(
name='organism',
options={'ordering': ['-id']},
),
migrations.AlterModelOptions(
name='trigger',
options={'ordering': ['-id']},
),
migrations.AlterModelOptions(
name='triggeralert',
options={'ordering': ['-id']},
),
migrations.AlterModelOptions(
name='triggeralertstatus',
options={'ordering': ['-id']},
),
migrations.AlterModelOptions(
name='triggerset',
options={'ordering': ['-id']},
),
migrations.AlterModelOptions(
name='triggersubscription',
options={'ordering': ['-id']},
),
migrations.AlterField(
model_name='triggerset',
name='email_title',
field=models.CharField(default='Alert from Leaf LIMS', max_length=255),
),
]
| mit | 1,684,550,303,918,089,200 | 27.363636 | 83 | 0.532853 | false | 4.691729 | false | false | false |
qualitio/qualitio | qualitio/core/views.py | 1 | 9056 | import operator
from mptt.models import MPTTModel
from reversion.models import Version
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.db.models.loading import get_model
from django.views.generic.simple import direct_to_template
from qualitio.core.utils import json_response
def to_tree_element(object, type):
tree_element = {'attr': {'id': "%s_%s" % (object.pk, type),
'rel': type},
'data': object.name}
if isinstance(object, MPTTModel):
try:
subchildren = getattr(getattr(object, "subchildren", None), "all", None)()
except TypeError: # Not really good idea, slow typecheck?
subchildren = None
if object.get_children() or subchildren:
tree_element['state'] = "closed"
return tree_element
@json_response
def get_children(request, directory, *args, **kwargs):
data = []
try:
node_id = int(request.GET.get('id', 0))
node = directory.objects.get(pk=node_id)
directories = node.children.order_by('name')
data = map(lambda x: to_tree_element(x, x._meta.module_name), directories)
try:
subchildren = getattr(node, "subchildren", None)
subchildren = getattr(subchildren, "order_by", lambda *a, **k: None)('name')
data.append(map(lambda x: to_tree_element(x, x._meta.module_name), subchildren))
except TypeError: # Not really good idea, slow typecheck?
pass
except (ObjectDoesNotExist, ValueError):
# TODO: maybe the better way is to override method 'root_nodes' on manager
directories = directory.tree.root_nodes().order_by('name')
data = map(lambda x: to_tree_element(x, x._meta.module_name),
directories)
return data
@json_response
def get_ancestors(request, app, *args, **kwargs):
Model = get_model(app, request.GET['type'])
object = Model.objects.get(pk=request.GET['id'])
ancestors = []
if isinstance(object, MPTTModel): # directory?
ancestors = object.get_ancestors()
else:
if object.parent:
ancestors = list(object.parent.get_ancestors())
ancestors.extend([object.parent])
return {"nodes": map(lambda x: '%s_%s' % (x.pk, x._meta.module_name), ancestors),
"target": "%s_%s" % (object.pk, object._meta.module_name)}
def history(request, object_id, Model, *args, **kwargs):
object = Model.objects.get(pk=object_id)
versions = Version.objects.get_for_object(object)
return direct_to_template(request, 'core/history.html',
{'object': object,
'name' : object._meta.object_name,
'versions' : versions})
def permission_required(request, *args, **kwargs):
return direct_to_template(request, 'core/permission_required.html')
registry = {}
def menu_view(_object, view_name, role="", index=-1, *args, **kwargs):
if index < 0:
index = len(registry)-index+1 # this is only append
if registry.has_key(_object):
registry[_object].insert(index, dict(name=view_name, role=role))
else:
registry[_object] = [dict(name=view_name, role=role)]
def _menu_view(function):
def __menu_view(*args, **kw):
return function(*args, **kw)
return __menu_view
return _menu_view
# jQuery DataTable's ajax params helper #################
class DataTableColumn(object):
"""
Column that represents jQuery DataTable column.
The main responsibility is to create criteria (django ``Q`` objects)
for each client-side table column.
Params:
- ``name``
name of models *attribute* (not the column label or name); if column have
information from ForeignKey field then use normal django-orm-like queries strings,
eg. for store.TestCase.requirement attribute the column name should be:
``requirement__name``.
- ``is_searchable``
think it's self-descibing
- ``search``
the search query, should contains query specified by a user; if query is NOT defined
for specific column, the search query from DataTables search field is used.
- ``search_is_regex``
tells column object should tread serach query as regex pattern
Used internally by DataTable class.
"""
def __init__(self, name=None, is_searchable=False, search=None, search_is_regex=None):
self.name = name
self.is_searchable = is_searchable
self.search = search
self.search_is_regex = search_is_regex
def search_key(self):
if self.search_is_regex:
return '%s__regex' % self.name
return '%s__icontains' % self.name
def construct_Q(self):
if not self.is_searchable:
return Q()
if not self.search:
return Q()
return Q(**{self.search_key():self.search})
class DataTableOptions(object):
"""
Represents jQuery DataTable options send by the plugin
via ajax request.
Usage:
def myview(request, ...):
options = DataTableOptions(request.GET)
# do something with options
"""
def getitem(self, itemable, key, *args):
"""
Work's pretty much the same as ``getattr`` function
but for objects that have ``__getitem__`` method.
"""
try:
return itemable[key]
except (IndexError, KeyError):
if args:
return args[0]
raise
def _get_name(self, column_names, column_index):
return self.getitem(column_names, column_index, None) # default name is None
def _get_searchable(self, opts_dict, column_index):
return opts_dict.get('bSearchable_%s' % column_index, 'false') == 'true'
def _get_search_query(self, opts_dict, column_index):
return opts_dict.get('sSearch_%s' % column_index, self.search) or self.search
def _get_search_is_regex(self, opts_dict, column_index):
return opts_dict.get('bRegex_%s' % column_index, 'false') == 'true' or self.search_is_regex
def _get_columns(self, columns_names, params):
columns = []
for i in xrange(len(columns_names)):
columns.append(DataTableColumn(**{
'name': self._get_name(columns_names, i),
'is_searchable': self._get_searchable(params, i),
'search': self._get_search_query(params, i),
'search_is_regex': self._get_search_is_regex(params, i),
}))
return columns
def _get_ordering(self, columns, params):
ordering = None
sorting_column_index = int(params.get('iSortingCols', None))
sorting_column_dir = params.get('sSortDir_0', 'asc')
if sorting_column_index:
ordering = columns[sorting_column_index].name
if sorting_column_dir == 'desc':
ordering = '-%s' % ordering
return ordering
def __init__(self, model, column_names, params):
self.search = params.get('sSearch', '')
self.search_is_regex = params.get('bRegex', 'false') == 'true'
self.columns = self._get_columns(column_names, params)
self.model = model
self.limit = int(params.get('iDisplayLength', 100))
self.start_record = int(params.get('iDisplayStart', 0))
self.end_record = self.start_record + self.limit
self.echo = int(params.get('sEcho', 1))
self.ordering = self._get_ordering(self.columns, params)
class DataTable(object):
model = None
def __init__(self, columns=None, params=None, model=None, queryset=None):
self._count = None # cache count field
self._meta = DataTableOptions(model or self.__class__.model or queryset.model, columns, params)
if queryset is not None:
self._queryset = queryset
else:
self._queryset = self._meta.model.objects
def construct_Q(self):
return reduce(operator.or_, [col.construct_Q() for col in self._meta.columns])
def queryset(self):
qs = self._queryset.filter(self.construct_Q())
if self._meta.ordering:
return qs.order_by(self._meta.ordering)
return qs
def count(self):
if self._count is None:
self._count = self.queryset().count()
return self._count
def slice_queryset(self):
return self.queryset()[self._meta.start_record:self._meta.end_record]
def map(self, function):
return map(function, self.slice_queryset())
def response_dict(self, mapitem=lambda x: x):
return {
'iTotalRecords': self.count(),
'iTotalDisplayRecords': self.count(),
'sEcho': self._meta.echo,
'aaData': self.map(mapitem),
}
#########################################################
| gpl-3.0 | -4,864,936,522,159,101,000 | 33.830769 | 103 | 0.599823 | false | 3.932262 | false | false | false |
hhorak/rebase-helper | test/test_application.py | 1 | 4369 | # -*- coding: utf-8 -*-
#
# This tool helps you to rebase package to the latest version
# Copyright (C) 2013-2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# he Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors: Petr Hracek <[email protected]>
# Tomas Hozza <[email protected]>
import os
from .base_test import BaseTest
from rebasehelper.cli import CLI
from rebasehelper.application import Application
from rebasehelper import settings
class TestApplication(BaseTest):
""" Application tests """
OLD_SOURCES = 'test-1.0.2.tar.xz'
NEW_SOURCES = 'test-1.0.3.tar.xz'
SPEC_FILE = 'test.spec'
PATCH_1 = 'test-testing.patch'
PATCH_2 = 'test-testing2.patch'
PATCH_3 = 'test-testing3.patch'
SOURCE_1 = 'file.txt.bz2'
TEST_FILES = [
OLD_SOURCES,
NEW_SOURCES,
SPEC_FILE,
PATCH_1,
PATCH_2,
PATCH_3,
SOURCE_1
]
cmd_line_args = ['--not-download-sources', '1.0.3']
def test_application_sources(self):
expected_dict = {
'new': {
'sources': [os.path.join(self.WORKING_DIR, 'test-source.sh'),
os.path.join(self.WORKING_DIR, 'source-tests.sh'),
os.path.join(self.WORKING_DIR, self.NEW_SOURCES)],
'version': '1.0.3',
'name': 'test',
'tarball': self.NEW_SOURCES,
'spec': os.path.join(self.WORKING_DIR, settings.REBASE_HELPER_RESULTS_DIR, self.SPEC_FILE),
'patches_full': {1: [os.path.join(self.WORKING_DIR, self.PATCH_1),
'',
0,
False],
2: [os.path.join(self.WORKING_DIR, self.PATCH_2),
'-p1',
1,
False],
3: [os.path.join(self.WORKING_DIR, self.PATCH_3),
'-p1',
2,
False]}},
'workspace_dir': os.path.join(self.WORKING_DIR, settings.REBASE_HELPER_WORKSPACE_DIR),
'old': {
'sources': [os.path.join(self.WORKING_DIR, 'test-source.sh'),
os.path.join(self.WORKING_DIR, 'source-tests.sh'),
os.path.join(self.WORKING_DIR, self.OLD_SOURCES)],
'version': '1.0.2',
'name': 'test',
'tarball': self.OLD_SOURCES,
'spec': os.path.join(self.WORKING_DIR, self.SPEC_FILE),
'patches_full': {1: [os.path.join(self.WORKING_DIR, self.PATCH_1),
'',
0,
False],
2: [os.path.join(self.WORKING_DIR, self.PATCH_2),
'-p1',
1,
False],
3: [os.path.join(self.WORKING_DIR, self.PATCH_3),
'-p1',
2,
False]}},
'results_dir': os.path.join(self.WORKING_DIR, settings.REBASE_HELPER_RESULTS_DIR)}
try:
cli = CLI(self.cmd_line_args)
app = Application(cli)
app.prepare_sources()
for key, val in app.kwargs.items():
if key in expected_dict:
assert val == expected_dict[key]
except OSError as oer:
pass
| gpl-2.0 | 800,306,180,075,415,300 | 38.718182 | 107 | 0.487984 | false | 4.04537 | true | false | false |
indautgrp/erpnext | erpnext/accounts/report/trial_balance_for_party/trial_balance_for_party.py | 1 | 5770 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, cint
from erpnext.accounts.report.trial_balance.trial_balance import validate_filters
def execute(filters=None):
validate_filters(filters)
show_party_name = is_party_name_visible(filters)
columns = get_columns(filters, show_party_name)
data = get_data(filters, show_party_name)
return columns, data
def get_data(filters, show_party_name):
party_name_field = "customer_name" if filters.get("party_type")=="Customer" else "supplier_name"
party_filters = {"name": filters.get("party")} if filters.get("party") else {}
parties = frappe.get_all(filters.get("party_type"), fields = ["name", party_name_field],
filters = party_filters, order_by="name")
company_currency = frappe.db.get_value("Company", filters.company, "default_currency")
opening_balances = get_opening_balances(filters)
balances_within_period = get_balances_within_period(filters)
data = []
# total_debit, total_credit = 0, 0
total_row = frappe._dict({
"opening_debit": 0,
"opening_credit": 0,
"debit": 0,
"credit": 0,
"closing_debit": 0,
"closing_credit": 0
})
for party in parties:
row = { "party": party.name }
if show_party_name:
row["party_name"] = party.get(party_name_field)
# opening
opening_debit, opening_credit = opening_balances.get(party.name, [0, 0])
row.update({
"opening_debit": opening_debit,
"opening_credit": opening_credit
})
# within period
debit, credit = balances_within_period.get(party.name, [0, 0])
row.update({
"debit": debit,
"credit": credit
})
# closing
closing_debit, closing_credit = toggle_debit_credit(opening_debit + debit, opening_credit + credit)
row.update({
"closing_debit": closing_debit,
"closing_credit": closing_credit
})
# totals
for col in total_row:
total_row[col] += row.get(col)
row.update({
"currency": company_currency
})
has_value = False
if (opening_debit or opening_credit or debit or credit or closing_debit or closing_credit):
has_value =True
if cint(filters.show_zero_values) or has_value:
data.append(row)
# Add total row
total_row.update({
"party": "'" + _("Totals") + "'",
"currency": company_currency
})
data.append(total_row)
return data
def get_opening_balances(filters):
gle = frappe.db.sql("""
select party, sum(debit) as opening_debit, sum(credit) as opening_credit
from `tabGL Entry`
where company=%(company)s
and ifnull(party_type, '') = %(party_type)s and ifnull(party, '') != ''
and (posting_date < %(from_date)s or ifnull(is_opening, 'No') = 'Yes')
group by party""", {
"company": filters.company,
"from_date": filters.from_date,
"party_type": filters.party_type
}, as_dict=True)
opening = frappe._dict()
for d in gle:
opening_debit, opening_credit = toggle_debit_credit(d.opening_debit, d.opening_credit)
opening.setdefault(d.party, [opening_debit, opening_credit])
return opening
def get_balances_within_period(filters):
gle = frappe.db.sql("""
select party, sum(debit) as debit, sum(credit) as credit
from `tabGL Entry`
where company=%(company)s
and ifnull(party_type, '') = %(party_type)s and ifnull(party, '') != ''
and posting_date >= %(from_date)s and posting_date <= %(to_date)s
and ifnull(is_opening, 'No') = 'No'
group by party""", {
"company": filters.company,
"from_date": filters.from_date,
"to_date": filters.to_date,
"party_type": filters.party_type
}, as_dict=True)
balances_within_period = frappe._dict()
for d in gle:
balances_within_period.setdefault(d.party, [d.debit, d.credit])
return balances_within_period
def toggle_debit_credit(debit, credit):
if flt(debit) > flt(credit):
debit = flt(debit) - flt(credit)
credit = 0.0
else:
credit = flt(credit) - flt(debit)
debit = 0.0
return debit, credit
def get_columns(filters, show_party_name):
columns = [
{
"fieldname": "party",
"label": _(filters.party_type),
"fieldtype": "Link",
"options": filters.party_type,
"width": 120
},
{
"fieldname": "opening_debit",
"label": _("Opening (Dr)"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "opening_credit",
"label": _("Opening (Cr)"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "debit",
"label": _("Debit"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "credit",
"label": _("Credit"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "closing_debit",
"label": _("Closing (Dr)"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "closing_credit",
"label": _("Closing (Cr)"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "currency",
"label": _("Currency"),
"fieldtype": "Link",
"options": "Currency",
"hidden": 1
}
]
if show_party_name:
columns.insert(1, {
"fieldname": "party_name",
"label": _(filters.party_type) + " Name",
"fieldtype": "Data",
"width": 200
})
return columns
def is_party_name_visible(filters):
show_party_name = False
if filters.get("party_type") == "Customer":
party_naming_by = frappe.db.get_single_value("Selling Settings", "cust_master_name")
else:
party_naming_by = frappe.db.get_single_value("Buying Settings", "supp_master_name")
if party_naming_by == "Naming Series":
show_party_name = True
return show_party_name | gpl-3.0 | 8,733,647,437,386,378,000 | 24.995495 | 101 | 0.644541 | false | 2.963534 | false | false | false |
lycying/seeking | sklib/ui/wysiwyg/syntax.py | 1 | 2930 | # coding:utf-8
#
# Copyright (c) 2010, guo.li <[email protected]>
# Site < http://code.google.com/p/seeking/ >
# All rights reserved.
# vim: set ft=python sw=2 ts=2 et:
#
from PyQt5.QtGui import QFont
from PyQt5.QtGui import QTextCharFormat
from PyQt5.QtGui import QSyntaxHighlighter
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QRegExp
class Highlighter(QSyntaxHighlighter):
def __init__(self, parent=None):
super(Highlighter, self).__init__(parent)
keywordFormat = QTextCharFormat()
keywordFormat.setForeground(Qt.darkBlue)
keywordFormat.setFontWeight(QFont.Bold)
keywordPatterns = ["""</?\w+\s+[^>]*>""","<[/]?(html|body|head|title|div|a|br|form|input|b|p|i|center|span|font|table|tr|td|h[1-6])[/]?>"]
self.highlightingRules = [(QRegExp(pattern), keywordFormat)
for pattern in keywordPatterns]
self.multiLineCommentFormat = QTextCharFormat()
self.multiLineCommentFormat.setForeground(Qt.red)
quotationFormat = QTextCharFormat()
quotationFormat.setForeground(Qt.darkGreen)
self.highlightingRules.append((QRegExp("\".*\""),
quotationFormat))
functionFormat = QTextCharFormat()
functionFormat.setFontItalic(True)
functionFormat.setForeground(Qt.blue)
self.highlightingRules.append((QRegExp("\\b[A-Za-z0-9_]+(?=\\()"),
functionFormat))
moreKeyWords = QTextCharFormat()
moreKeyWords.setForeground(Qt.darkMagenta)
moreKeyWords.setFontWeight(QFont.Bold)
self.highlightingRules.append((QRegExp("(id|class|src|border|width|height|style|name|type|value)="),moreKeyWords))
self.commentStartExpression = QRegExp("<!--")
self.commentEndExpression = QRegExp("-->")
def highlightBlock(self, text):
for pattern, formats in self.highlightingRules:
expression = QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, formats)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
startIndex = 0
if self.previousBlockState() != 1:
startIndex = self.commentStartExpression.indexIn(text)
while startIndex >= 0:
endIndex = self.commentEndExpression.indexIn(text, startIndex)
if endIndex == -1:
self.setCurrentBlockState(1)
commentLength = len(text) - startIndex
else:
commentLength = endIndex - startIndex + self.commentEndExpression.matchedLength()
self.setFormat(startIndex, commentLength,
self.multiLineCommentFormat)
startIndex = self.commentStartExpression.indexIn(text,
startIndex + commentLength);
| gpl-2.0 | -3,336,502,950,754,958,000 | 36.564103 | 146 | 0.639249 | false | 3.885942 | false | false | false |
nheijmans/random_scripts | mock_http_server/http_https_server.py | 1 | 1185 | # given a pem file ... openssl req -new -x509 -keyout yourpemfile.pem -out yourpemfile.pem -days 365 -nodes
import sys
import ssl
import time
import signal
import threading
import BaseHTTPServer, SimpleHTTPServer
def http_worker():
httpd = BaseHTTPServer.HTTPServer(('localhost', 8080), SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.serve_forever()
return
def https_worker():
httpd = BaseHTTPServer.HTTPServer(('localhost', 4443), SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket (httpd.socket, server_side=True,
certfile='yourpemfile.pem')
httpd.serve_forever()
return
def signal_handler(sig, frame):
print('You pressed Ctrl+C! Now exiting')
sys.exit(0)
if __name__ == "__main__":
# http server
http = threading.Thread(name='httpserver', target=http_worker)
http.setDaemon(True)
# https server
https = threading.Thread(name='httpsserver',target=https_worker)
https.setDaemon(True)
http.start()
https.start()
# catch ctrl+c to exit the script
signal.signal(signal.SIGINT, signal_handler)
print("Press CTRL+C to stop the script")
signal.pause()
| gpl-3.0 | 8,117,941,957,505,710,000 | 27.214286 | 107 | 0.699578 | false | 3.822581 | false | false | false |
boazjohn/pyspark-job-server | lib/receiver.py | 1 | 8167 | #!/usr/bin/python
# Standard Library
import json
import time
import logging
import socket
from threading import Thread, Lock
# Third Party
# Local
# set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class EventBroadcastReceiver:
def __init__(self, host='localhost', port=0):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((host,port))
self.socket.listen(5)
if port == 0:
port = self.socket.getsockname()[1]
self.port = port
self.statusLock = Lock()
self.status = {
"jobs": {}
}
self.stageMap = {}
self.idMap = {}
def run(self):
self.listener = Thread(target=self._run)
self.listener.start()
def _run(self):
conn, address = self.socket.accept()
data = ""
while True:
data += conn.recv(1024)
while True:
newline = data.find('\n')
if newline != -1:
self.statusLock.acquire()
try:
jsonData = data[:newline]
self.processEvent(json.loads(data[:newline]))
except Exception as e:
print "ERROR: %s" % e
finally:
data = data[newline+1:]
self.statusLock.release()
else:
break
def processEvent(self, event):
eventType = event['Event']
# TODO DEAL WITH FAILED TASKS!!!
if eventType == "SparkListenerApplicationStart":
self.status['appName'] = event["App Name"]
self.status['started'] = event["Timestamp"]
elif eventType == "SparkListenerJobStart":
jobId = event['Job ID']
stages = event["Stage IDs"]
properties = event["Properties"]
jobInfo = {
"id": jobId,
"numStages" : len(stages),
"stagesWaiting": stages,
"stagesInProgress" : [],
"stagesComplete" : [],
"stages": {
stage : {
"id": stage,
"numTasks" : 0,
"tasksInProgress" : [],
"tasksComplete" : [],
"tasks": {},
"complete": False
}
for stage in stages},
"complete": False,
"failed": False,
"properties": properties
}
for stage in stages:
self.stageMap[stage] = jobId
if "spark.jobGroup.id" in properties:
self.idMap[properties["spark.jobGroup.id"]] = jobId
jobInfo['handle'] = properties["spark.jobGroup.id"]
self.status['jobs'][jobId] = jobInfo
# Clean up old, complete jobs
i = 0
keys = self.status['jobs'].keys()
for key in keys:
if len(self.status['jobs']) <= 100:
break
if self.status['jobs'][key]['complete']:
del self.status['jobs'][key]
elif eventType == "SparkListenerStageSubmitted":
info = event["Stage Info"]
stageId = info["Stage ID"]
jobId = self.stageMap[stageId]
job = self.status['jobs'][jobId]
job['stagesWaiting'].remove(stageId)
job['stagesInProgress'].append(stageId)
stage = job['stages'][stageId]
stage['numTasks'] = info["Number of Tasks"]
elif eventType == "SparkListenerTaskStart":
info = event["Task Info"]
taskId = info["Task ID"]
stageId = event["Stage ID"]
jobId = self.stageMap[stageId]
stage = self.status['jobs'][jobId]['stages'][stageId]
stage["tasksInProgress"].append(taskId)
stage["tasks"][taskId] = {
"id": taskId,
"started": info["Launch Time"]
}
elif eventType == "SparkListenerTaskEnd":
info = event["Task Info"]
taskId = info["Task ID"]
stageId = event["Stage ID"]
jobId = self.stageMap[stageId]
stage = self.status['jobs'][jobId]['stages'][stageId]
stage["tasksInProgress"].remove(taskId)
stage["tasksComplete"].append(taskId)
stage["tasks"][taskId]['finished'] = info["Finish Time"]
# TODO Handle event where task ends in failure
elif eventType == "SparkListenerStageCompleted":
info = event["Stage Info"]
stageId = info["Stage ID"]
jobId = self.stageMap[stageId]
job = self.status['jobs'][jobId]
job['stagesInProgress'].remove(stageId)
job['stagesComplete'].append(stageId)
stage = job['stages'][stageId]
stage["complete"] = True
elif eventType == "SparkListenerJobEnd":
jobId = event['Job ID']
job = self.status['jobs'][jobId]
job["complete"] = True
result = event['Job Result']
if result['Result'] == 'JobFailed':
job["failed"] = True
def getStatus(self):
status = {}
self.statusLock.acquire()
try:
status = dict(self.status.items())
except Exception as e:
print e
finally:
self.statusLock.release()
return status
def getProgress(self, jobType=None):
status = self.getStatus()
if jobType:
status['jobs'] = {
key: value for key, value in status['jobs'].items() if value['properties']['spark.job.type'] == jobType
}
status['jobs'] = {
jobId: self._processJobStatusToProgress(info)
for jobId, info in status['jobs'].items()
}
return status
def getJobStatus(self, jobName):
jobId = self.idMap.get(jobName, None)
status = self.getStatus()
jobStatus = status['jobs'].get(jobId, {})
return jobStatus
def _processJobStatusToProgress(self, status):
if len(status) == 0:
return {}
stages = status['stages']
properties = status['properties']
totalStages = len(stages)
completeStages = len([stage for stage in stages.values() if stage['complete']])
if totalStages == 0:
completeStages = 1
totalStages = 1
progress = {
"name": properties.get("spark.job.name", ""),
"type": properties.get("spark.job.type", ""),
"complete": status['complete'],
"failed": status['failed'],
"totalStages": totalStages,
"completeStages": completeStages,
"stageProgress": float(completeStages)/float(totalStages),
}
if "handle" in status:
progress['handle'] = status['handle']
if len(status["stagesInProgress"]) > 0:
currentStage = stages[status["stagesInProgress"][0]]
totalTasks = currentStage['numTasks']
completeTasks = len(currentStage['tasksComplete'])
if totalTasks == 0:
completeTasks = 1
totalTasks = 1
progress["currentStage"] = {
"totalTasks": totalTasks,
"completeTasks": completeTasks,
"taskProgress": float(completeTasks)/float(totalTasks)
}
return progress
def getJobProgress(self, jobName):
status = self.getJobStatus(jobName)
return self._processJobStatusToProgress(status)
def getRunningCount(self):
return len([job for job in self.getStatus()['jobs'].values() if not job['complete']])
def close():
# TODO actually close shit up
pass
| bsd-3-clause | 7,201,294,214,269,019,000 | 28.806569 | 119 | 0.501408 | false | 4.585626 | false | false | false |
AvadootNachankar/gstudio | gnowsys-ndf/gnowsys_ndf/ndf/views/data_review.py | 2 | 15718 | ''' -- Imports from python libraries -- '''
# import os, re
import json
''' -- imports from installed packages -- '''
from django.http import HttpResponse
from django.shortcuts import render_to_response # , render #uncomment when to use
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from mongokit import paginator
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
# from django.http import Http404
''' -- imports from application folders/files -- '''
from gnowsys_ndf.ndf.models import Node # , GRelation, Triple
from gnowsys_ndf.ndf.models import node_collection, triple_collection
# from gnowsys_ndf.ndf.models import GSystemType#, GSystem uncomment when to use
# from gnowsys_ndf.ndf.models import File
from gnowsys_ndf.ndf.models import STATUS_CHOICES
from gnowsys_ndf.ndf.views.methods import get_node_common_fields,get_execution_time # , create_grelation_list ,set_all_urls
from gnowsys_ndf.ndf.views.methods import create_grelation
# from gnowsys_ndf.ndf.views.methods import create_gattribute
from gnowsys_ndf.ndf.views.methods import get_node_metadata, get_page, get_group_name_id
# from gnowsys_ndf.ndf.org2any import org2html
from gnowsys_ndf.ndf.views.search_views import results_search
# from gnowsys_ndf.settings import GSTUDIO_SITE_VIDEO
# from gnowsys_ndf.settings import EXTRA_LANG_INFO
from gnowsys_ndf.settings import GSTUDIO_RESOURCES_EDUCATIONAL_SUBJECT
from gnowsys_ndf.settings import GSTUDIO_RESOURCES_EDUCATIONAL_USE
from gnowsys_ndf.settings import GSTUDIO_RESOURCES_INTERACTIVITY_TYPE
from gnowsys_ndf.settings import GSTUDIO_RESOURCES_EDUCATIONAL_ALIGNMENT
from gnowsys_ndf.settings import GSTUDIO_RESOURCES_EDUCATIONAL_LEVEL
from gnowsys_ndf.settings import GSTUDIO_RESOURCES_CURRICULAR
from gnowsys_ndf.settings import GSTUDIO_RESOURCES_AUDIENCE
from gnowsys_ndf.settings import GSTUDIO_RESOURCES_TEXT_COMPLEXITY
from gnowsys_ndf.settings import GSTUDIO_RESOURCES_LANGUAGES
GST_FILE = node_collection.one({'_type': 'GSystemType', 'name': u'File'})
pandora_video_st = node_collection.one({'$and': [{'_type': 'GSystemType'}, {'name': 'Pandora_video'}]})
file_id = node_collection.find_one({'_type': "GSystemType", "name": "File"}, {"_id": 1})
page_id = node_collection.find_one({'_type': "GSystemType", "name": "Page"}, {"_id": 1})
theme_gst_id = node_collection.find_one({'_type': "GSystemType", "name": "Theme"}, {"_id": 1})
# data review in File app
@login_required
@get_execution_time
def data_review(request, group_id, page_no=1, **kwargs):
'''
To get all the information related to every resource object in the group.
To get processed context_variables into another variable,
pass <get_paged_resources=True> as last arg.
e.g:
context_variables = data_review(request, group_id, page_no, get_paged_resources=True)
'''
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
files_obj = node_collection.find({
'member_of': {'$in': [
ObjectId(file_id._id),
ObjectId(page_id._id),
ObjectId(theme_gst_id._id)
]},
# '_type': 'File', 'fs_file_ids': {'$ne': []},
'group_set': {'$in': [ObjectId(group_id)]},
'$or': [
{'access_policy': u"PUBLIC"},
{'$and': [
{'access_policy': u"PRIVATE"},
{'created_by': request.user.id}
]
}
]
# {'member_of': {'$all': [pandora_video_st._id]}}
}).sort("created_at", -1)
# implementing pagination: paginator.Paginator(cursor_obj, <int: page no>, <int: no of obj in each page>)
# (ref: https://github.com/namlook/mongokit/blob/master/mongokit/paginator.py)
paged_resources = paginator.Paginator(files_obj, page_no, 10)
# list to hold resources instances with it's attributes and relations
files_list = []
for each_resource in paged_resources.items:
# each_resource, ver = get_page(request, each_resource)
each_resource.get_neighbourhood(each_resource.member_of)
files_list.append(node_collection.collection.GSystem(each_resource))
# print "==============", each_resource.name, " : ", each_resource.group_set
# print "\n\n\n========", each_resource.keys()
# for each, val in each_resource.iteritems():
# print each, "--", val,"\n"
# print "files_obj.count: ", files_obj.count()
files_obj.close()
context_variables = {
"group_id": group_id, "groupid": group_id,
"files": files_list, "page_info": paged_resources,
"urlname": "data_review_page", "second_arg": "",
"static_educationalsubject": GSTUDIO_RESOURCES_EDUCATIONAL_SUBJECT,
# "static_language": EXTRA_LANG_INFO,
"static_language": GSTUDIO_RESOURCES_LANGUAGES,
"static_educationaluse": GSTUDIO_RESOURCES_EDUCATIONAL_USE,
"static_interactivitytype": GSTUDIO_RESOURCES_INTERACTIVITY_TYPE,
"static_educationalalignment": GSTUDIO_RESOURCES_EDUCATIONAL_ALIGNMENT,
"static_educationallevel": GSTUDIO_RESOURCES_EDUCATIONAL_LEVEL,
"static_curricular": GSTUDIO_RESOURCES_CURRICULAR,
"static_audience": GSTUDIO_RESOURCES_AUDIENCE,
"static_status": list(STATUS_CHOICES),
"static_textcomplexity": GSTUDIO_RESOURCES_TEXT_COMPLEXITY
}
if kwargs.get('get_paged_resources', False):
return context_variables
template_name = "ndf/data_review.html"
return render_to_response(
template_name,
context_variables,
context_instance=RequestContext(request)
)
# ---END of data review in File app
@get_execution_time
def get_dr_search_result_dict(request, group_id, search_text=None, page_no=1):
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
# check if request is from form or from next page
if request.GET.has_key("search_text"):
search_text = request.GET.get("search_text", "")
else:
search_text = search_text.replace("+", " ")
get_req = request.GET.copy()
# adding values to GET req
get_req.update({"search_text": search_text})
# overwriting request.GET with newly created QueryDict instance get_req
request.GET = get_req
search_reply = json.loads(results_search(request, group_id, return_only_dict = True))
exact_search_res = search_reply["exact"]["name"]
result_ids_list = [ ObjectId(each_dict["_id"]) for each_dict in exact_search_res ]
result_cur = node_collection.find({
"_id": {"$in": result_ids_list},
'member_of': {'$in': [ObjectId(file_id._id), ObjectId(page_id._id)]}
})
paged_resources = paginator.Paginator(result_cur, page_no, 10)
# list to hold resources instances with it's attributes and relations
files_list = []
for each_resource in paged_resources.items:
each_resource, ver = get_page(request, each_resource)
each_resource.get_neighbourhood(each_resource.member_of)
files_list.append(node_collection.collection.GSystem(each_resource))
return render_to_response("ndf/data_review.html",
{
"group_id": group_id, "groupid": group_id,
"files": files_list, "page_info": paged_resources,
"urlname": "data_review_search_page",
"second_arg": search_text, "search_text": search_text,
"static_educationalsubject": GSTUDIO_RESOURCES_EDUCATIONAL_SUBJECT,
# "static_language": EXTRA_LANG_INFO,
"static_language": GSTUDIO_RESOURCES_LANGUAGES,
"static_educationaluse": GSTUDIO_RESOURCES_EDUCATIONAL_USE,
"static_interactivitytype": GSTUDIO_RESOURCES_INTERACTIVITY_TYPE,
"static_educationalalignment": GSTUDIO_RESOURCES_EDUCATIONAL_ALIGNMENT,
"static_educationallevel": GSTUDIO_RESOURCES_EDUCATIONAL_LEVEL,
"static_curricular": GSTUDIO_RESOURCES_CURRICULAR,
"static_audience": GSTUDIO_RESOURCES_AUDIENCE,
"static_status": list(STATUS_CHOICES),
"static_textcomplexity": GSTUDIO_RESOURCES_TEXT_COMPLEXITY
}, context_instance=RequestContext(request))
# saving resource object of data review
@login_required
@get_execution_time
def data_review_save(request, group_id):
'''
Method to save each and every data-row edit of data review app
'''
userid = request.user.pk
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
group_obj = node_collection.one({"_id": ObjectId(group_id)})
node_oid = request.POST.get("node_oid", "")
node_details = request.POST.get("node_details", "")
node_details = json.loads(node_details)
# print "node_details : ", node_details
# updating some key names of dictionary as per get_node_common_fields.
node_details["lan"] = node_details.pop("language")
node_details["prior_node_list"] = node_details.pop("prior_node")
node_details["login-mode"] = node_details.pop("access_policy")
status = node_details.pop("status")
# node_details["collection_list"] = node_details.pop("collection") for future use
# Making copy of POST QueryDict instance.
# To make it mutable and fill in node_details value/s.
post_req = request.POST.copy()
# removing node_details dict from req
post_req.pop('node_details')
# adding values to post req
post_req.update(node_details)
# overwriting request.POST with newly created QueryDict instance post_req
request.POST = post_req
# print "\n---\n", request.POST, "\n---\n"
license = request.POST.get('license', '')
file_node = node_collection.one({"_id": ObjectId(node_oid)})
if request.method == "POST":
edit_summary = []
file_node_before = file_node.copy() # copying before it is getting modified
is_changed = get_node_common_fields(request, file_node, group_id, GST_FILE)
for key, val in file_node_before.iteritems():
if file_node_before[key] != file_node[key]:
temp_edit_summ = {}
temp_edit_summ["name"] = "Field: " + key
temp_edit_summ["before"] = file_node_before[key]
temp_edit_summ["after"] = file_node[key]
edit_summary.append(temp_edit_summ)
# to fill/update attributes of the node and get updated attrs as return
ga_nodes = get_node_metadata(request, file_node, is_changed=True)
if len(ga_nodes):
is_changed = True
# adding the edit attribute name in summary
for each_ga in ga_nodes:
temp_edit_summ = {}
temp_edit_summ["name"] = "Attribute: " + each_ga["node"]["attribute_type"]["name"]
temp_edit_summ["before"] = each_ga["before_obj_value"]
temp_edit_summ["after"] = each_ga["node"]["object_value"]
edit_summary.append(temp_edit_summ)
teaches_list = request.POST.get('teaches', '') # get the teaches list
prev_teaches_list = request.POST.get("teaches_prev", "") # get the before-edit teaches list
# check if teaches list exist means nodes added/removed for teaches relation_type
# also check for if previous teaches list made empty with prev_teaches_list
if (teaches_list != '') or prev_teaches_list:
teaches_list = teaches_list.split(",") if teaches_list else []
teaches_list = [ObjectId(each_oid) for each_oid in teaches_list]
relation_type_node = node_collection.one({'_type': "RelationType", 'name':'teaches'})
gr_nodes = create_grelation(file_node._id, relation_type_node, teaches_list)
gr_nodes_oid_list = [ObjectId(each_oid["right_subject"]) for each_oid in gr_nodes] if gr_nodes else []
prev_teaches_list = prev_teaches_list.split(",") if prev_teaches_list else []
prev_teaches_list = [ObjectId(each_oid) for each_oid in prev_teaches_list]
if len(gr_nodes_oid_list) == len(prev_teaches_list) and set(gr_nodes_oid_list) == set(prev_teaches_list):
pass
else:
rel_nodes = triple_collection.find({'_type': "GRelation",
'subject': file_node._id,
'relation_type.$id': relation_type_node._id
})
rel_oid_name = {}
for each in rel_nodes:
temp = {}
temp[each.right_subject] = each.name
rel_oid_name.update(temp)
is_changed = True
temp_edit_summ = {}
temp_edit_summ["name"] = "Relation: Teaches"
temp_edit_summ["before"] = [rel_oid_name[each_oid].split(" -- ")[2] for each_oid in prev_teaches_list]
temp_edit_summ["after"] = [rel_oid_name[each_oid].split(" -- ")[2] for each_oid in gr_nodes_oid_list]
edit_summary.append(temp_edit_summ)
assesses_list = request.POST.get('assesses_list','')
if assesses_list != '':
assesses_list = assesses_list.split(",")
assesses_list = [ObjectId(each_oid) for each_oid in assesses_list]
relation_type_node = node_collection.one({'_type': "RelationType", 'name':'assesses'})
gr_nodes = create_grelation(file_node._id, relation_type_node, teaches_list)
gr_nodes_oid_list = [ObjectId(each_oid["right_subject"]) for each_oid in gr_nodes]
if len(gr_nodes_oid_list) == len(teaches_list) and set(gr_nodes_oid_list) == set(teaches_list):
pass
else:
is_changed = True
# changing status to draft even if attributes/relations are changed
if is_changed:
file_node.status = unicode("DRAFT")
file_node.modified_by = userid
if userid not in file_node.contributors:
file_node.contributors.append(userid)
# checking if user is authenticated to change the status of node
if status and ((group_obj.is_gstaff(request.user)) or (userid in group_obj.author_set)):
if file_node.status != status:
file_node.status = unicode(status)
file_node.modified_by = userid
if userid not in file_node.contributors:
file_node.contributors.append(userid)
is_changed = True
if is_changed:
file_node.save(groupid=group_id)
# print edit_summary
return HttpResponse(file_node.status)
# ---END of data review saving.
| agpl-3.0 | 8,873,485,038,332,246,000 | 42.905028 | 124 | 0.599313 | false | 3.63254 | false | false | false |
RakanNimer/hackathon | main.py | 1 | 1342 | from flask import Flask
import flask
from reddit import Submissions
from reddit import Submission
import redis
import json
try:
from flask.ext.cors import CORS # The typical way to import flask-cors
except ImportError:
# Path hack allows examples to be run without installation.
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
from flask.ext.cors import CORS
r = redis.StrictRedis(host='localhost', port=6379, db=0)
app = Flask(__name__)
cors = CORS(app)
@app.route("/")
def hello():
if r.get('worldnews') is None :
submissions = Submissions();
submissions.getFromReddit('worldnews',10)
urlsInTweets = submissions.getFromTwitter()
r.set('worldnews', json.dumps(urlsInTweets))
return flask.jsonify(result=urlsInTweets)
else :
urlsInTweets = r.get('worldnews')
submissions = json.loads(urlsInTweets)
return flask.jsonify(result=submissions)
# submissionsInfo = {'result' : result}
# return flask.jsonify(result=submissionsInfo)
#submissions = Submissions().getFromReddit('worldnews',10)
#a = submissions.getFromTwitter()
#submission.getTweetLinksFromHashtags()
if __name__ == "__main__":
app.debug = True
app.run(threaded=True) | mit | 1,764,334,778,868,504,600 | 28.195652 | 75 | 0.681073 | false | 3.646739 | false | false | false |
xhochy/g-octave | g_octave/config.py | 1 | 2924 | # -*- coding: utf-8 -*-
"""
g_octave.config
~~~~~~~~~~~~~~~
This module implements a Python object to handle the configuration
of g-octave.
:copyright: (c) 2009-2010 by Rafael Goncalves Martins
:license: GPL-2, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
from .exception import GOctaveError
# py3k compatibility
from .compat import py3k
if py3k:
import configparser
else:
import ConfigParser as configparser
__all__ = ['Config']
class Config(object):
_defaults = {
'db': '/var/cache/g-octave',
'overlay': '/var/lib/g-octave',
'categories': 'main,extra,language',
'db_mirror': 'github://rafaelmartins/g-octave-db',
'trac_user': '',
'trac_passwd': '',
'log_level': '',
'log_file': '/var/log/g-octave.log',
'package_manager': 'portage',
'use_scm': 'false',
}
_section_name = 'main'
_environ_namespace = 'GOCTAVE_'
def __init__(self, config_file=None):
# config Parser
self._config = configparser.ConfigParser(self._defaults)
# current directory
cwd = os.path.dirname(os.path.realpath(__file__))
# no configuration file provided as parameter
if config_file is None:
# we just want one of the following configuration files:
# '/etc/g-octave.cfg', '../etc/g-octave.cfg'
available_files = [
os.path.join('/etc', 'g-octave.cfg'),
os.path.join(cwd, '..', 'etc', 'g-octave.cfg'),
]
# get the first one available
for my_file in available_files:
if os.path.exists(my_file):
config_file = my_file
break
# parse the wanted file using ConfigParser
parsed_files = self._config.read(config_file)
# no file to parsed
if len(parsed_files) == 0:
raise GOctaveError('File not found: %r' % config_file)
def _evaluate_from_file(self, attr):
# return the value from the configuration file
try:
return self._config.get(self._section_name, attr)
except (configparser.NoSectionError, configparser.NoOptionError):
return None
def _evaluate_from_environ(self, attr):
# return the value from the environment variables namespace
return os.environ.get(self._environ_namespace + attr.upper(), None)
def __getattr__(self, attr):
# valid attribute?
if attr in self._defaults:
# try the environment variable first
from_env = self._evaluate_from_environ(attr)
if from_env is not None:
return from_env
# default to the configuration file
return self._evaluate_from_file(attr)
else:
raise GOctaveError('Invalid option: %r' % attr)
| gpl-2.0 | 2,911,297,229,804,912,600 | 28.836735 | 75 | 0.576949 | false | 3.967436 | true | false | false |
banesullivan/ParaViewGeophysics | PVGeo/ubc/tensor.py | 1 | 21910 | __all__ = [
'TensorMeshReader',
'TensorMeshAppender',
'TopoMeshAppender',
]
__displayname__ = 'Tensor Mesh'
import os
import sys
import numpy as np
import pandas as pd
import vtk
from .. import _helpers, interface
from ..base import AlgorithmBase
from .two_file_base import ModelAppenderBase, ubcMeshReaderBase
if sys.version_info < (3,):
from StringIO import StringIO
else:
from io import StringIO
class TensorMeshReader(ubcMeshReaderBase):
"""UBC Mesh 2D/3D models are defined using a 2-file format. The "mesh" file
describes how the data is discretized. The "model" file lists the physical
property values for all cells in a mesh. A model file is meaningless without
an associated mesh file. The reader will automatically detect if the mesh is
2D or 3D and read the remainder of the data with that dimensionality
assumption. If the mesh file is 2D, then then model file must also be in the
2D format (same for 3D).
Note:
Model File is optional. Reader will still construct
``vtkRectilinearGrid`` safely.
"""
__displayname__ = 'UBC Tensor Mesh Reader'
__category__ = 'reader'
description = 'PVGeo: UBC Mesh 2D/3D Two-File Format'
def __init__(self, nOutputPorts=1, outputType='vtkRectilinearGrid', **kwargs):
ubcMeshReaderBase.__init__(
self, nOutputPorts=nOutputPorts, outputType=outputType, **kwargs
)
self.__mesh = vtk.vtkRectilinearGrid()
self.__models = []
@staticmethod
def place_model_on_mesh(mesh, model, data_name='Data'):
"""Places model data onto a mesh. This is for the UBC Grid data reaers
to associate model data with the mesh grid.
Args:
mesh (vtkRectilinearGrid): The ``vtkRectilinearGrid`` that is the
mesh to place the model data upon.
model (np.array): A NumPy float array that holds all of the data to
place inside of the mesh's cells.
data_name (str) : The name of the model data array once placed on the
``vtkRectilinearGrid``.
Return:
vtkRectilinearGrid :
Returns the input ``vtkRectilinearGrid`` with model data appended.
"""
if isinstance(model, dict):
for key in model.keys():
TensorMeshReader.place_model_on_mesh(mesh, model[key], data_name=key)
return mesh
# model.GetNumberOfValues() if model is vtkDataArray
# Make sure this model file fits the dimensions of the mesh
ext = mesh.GetExtent()
n1, n2, n3 = ext[1], ext[3], ext[5]
if n1 * n2 * n3 < len(model):
raise _helpers.PVGeoError(
'Model `%s` has more data than the given mesh has cells to hold.'
% data_name
)
elif n1 * n2 * n3 > len(model):
raise _helpers.PVGeoError(
'Model `%s` does not have enough data to fill the given mesh\'s cells.'
% data_name
)
# Swap axes because VTK structures the coordinates a bit differently
# - This is absolutely crucial!
# - Do not play with unless you know what you are doing!
if model.ndim > 1 and model.ndim < 3:
ncomp = model.shape[1]
model = np.reshape(model, (n1, n2, n3, ncomp))
model = np.swapaxes(model, 0, 1)
model = np.swapaxes(model, 0, 2)
# Now reverse Z axis
model = model[::-1, :, :, :] # Note it is in Fortran ordering
model = np.reshape(model, (n1 * n2 * n3, ncomp))
else:
model = np.reshape(model, (n1, n2, n3))
model = np.swapaxes(model, 0, 1)
model = np.swapaxes(model, 0, 2)
# Now reverse Z axis
model = model[::-1, :, :] # Note it is in Fortran ordering
model = model.flatten()
# Convert data to VTK data structure and append to output
c = interface.convert_array(model, name=data_name, deep=True)
# THIS IS CELL DATA! Add the model data to CELL data:
mesh.GetCellData().AddArray(c)
return mesh
# ------------------------------------------------------------------#
# ---------------------- UBC MESH 2D ------------------------#
# ------------------------------------------------------------------#
@staticmethod
def ubc_mesh_2d(FileName, output):
"""This method reads a UBC 2D Mesh file and builds an empty
``vtkRectilinearGrid`` for data to be inserted into. `Format Specs`_.
.. _Format Specs: http://giftoolscookbook.readthedocs.io/en/latest/content/fileFormats/mesh2Dfile.html
Args:
FileName (str) : The mesh filename as an absolute path for the input
mesh file in UBC 3D Mesh Format.
output (vtkRectilinearGrid) : The output data object
Return:
vtkRectilinearGrid :
a ``vtkRectilinearGrid`` generated from the UBC 3D Mesh grid.
Mesh is defined by the input mesh file.
No data attributes here, simply an empty mesh. Use the
``place_model_on_mesh()`` method to associate with model data.
"""
# Read in data from file
xpts, xdisc, zpts, zdisc = ubcMeshReaderBase._ubc_mesh_2d_part(FileName)
nx = np.sum(np.array(xdisc, dtype=int)) + 1
nz = np.sum(np.array(zdisc, dtype=int)) + 1
# Now generate the vtkRectilinear Grid
def _genCoords(pts, disc, z=False):
c = [float(pts[0])]
for i in range(len(pts) - 1):
start = float(pts[i])
stop = float(pts[i + 1])
num = int(disc[i])
w = (stop - start) / num
for j in range(1, num):
c.append(start + (j) * w)
c.append(stop)
c = np.array(c, dtype=float)
if z:
c = -c[::-1]
return interface.convert_array(c, deep=True)
xcoords = _genCoords(xpts, xdisc)
zcoords = _genCoords(zpts, zdisc, z=True)
ycoords = interface.convert_array(np.zeros(1), deep=True)
output.SetDimensions(nx, 2, nz) # note this subtracts 1
output.SetXCoordinates(xcoords)
output.SetYCoordinates(ycoords)
output.SetZCoordinates(zcoords)
return output
@staticmethod
def ubc_model_2d(FileName):
"""Reads a 2D model file and returns a 1D NumPy float array. Use the
``place_model_on_mesh()`` method to associate with a grid.
Note:
Only supports single component data
Args:
FileName (str) : The model filename as an absolute path for the
input model file in UBCMesh Model Format. Also accepts a list of
string file names.
Return:
np.array :
a NumPy float array that holds the model data read from
the file. Use the ``place_model_on_mesh()`` method to associate
with a grid. If a list of file names is given then it will
return a dictionary of NumPy float array with keys as the
basenames of the files.
"""
if isinstance(FileName, (list, tuple)):
out = {}
for f in FileName:
out[os.path.basename(f)] = TensorMeshReader.ubc_model_2d(f)
return out
dim = np.genfromtxt(
FileName, dtype=int, delimiter=None, comments='!', max_rows=1
)
names = ['col%d' % i for i in range(dim[0])]
df = pd.read_csv(
FileName, names=names, delim_whitespace=True, skiprows=1, comment='!'
)
data = df.values
if np.shape(data)[0] != dim[1] and np.shape(data)[1] != dim[0]:
raise _helpers.PVGeoError('Mode file `%s` improperly formatted.' % FileName)
return data.flatten(order='F')
def __ubc_mesh_data_2d(self, filename_mesh, filename_models, output):
"""Helper method to read a 2D mesh"""
# Construct/read the mesh
if self.need_to_readMesh():
TensorMeshReader.ubc_mesh_2d(filename_mesh, self.__mesh)
self.need_to_readMesh(flag=False)
output.DeepCopy(self.__mesh)
if self.need_to_readModels() and self.this_has_models():
self.__models = []
for f in filename_models:
# Read the model data
self.__models.append(TensorMeshReader.ubc_model_2d(f))
self.need_to_readModels(flag=False)
return output
# ------------------------------------------------------------------#
# ---------------------- UBC MESH 3D ------------------------#
# ------------------------------------------------------------------#
@staticmethod
def ubc_mesh_3d(FileName, output):
"""This method reads a UBC 3D Mesh file and builds an empty
``vtkRectilinearGrid`` for data to be inserted into.
Args:
FileName (str) : The mesh filename as an absolute path for the input
mesh file in UBC 3D Mesh Format.
output (vtkRectilinearGrid) : The output data object
Return:
vtkRectilinearGrid :
a ``vtkRectilinearGrid`` generated from the UBC 3D Mesh grid.
Mesh is defined by the input mesh file.
No data attributes here, simply an empty mesh. Use the
``place_model_on_mesh()`` method to associate with model data.
"""
# --- Read in the mesh ---#
fileLines = np.genfromtxt(FileName, dtype=str, delimiter='\n', comments='!')
# Get mesh dimensions
dim = np.array(fileLines[0].split('!')[0].split(), dtype=int)
dim = (dim[0] + 1, dim[1] + 1, dim[2] + 1)
# The origin corner (Southwest-top)
# - Remember UBC format specifies down as the positive Z
# - Easting, Northing, Altitude
oo = np.array(fileLines[1].split('!')[0].split(), dtype=float)
ox, oy, oz = oo[0], oo[1], oo[2]
# Read cell sizes for each line in the UBC mesh files
def _readCellLine(line):
line_list = []
for seg in line.split():
if '*' in seg:
sp = seg.split('*')
seg_arr = np.ones((int(sp[0]),), dtype=float) * float(sp[1])
else:
seg_arr = np.array([float(seg)], dtype=float)
line_list.append(seg_arr)
return np.concatenate(line_list)
# Read the cell sizes
cx = _readCellLine(fileLines[2].split('!')[0])
cy = _readCellLine(fileLines[3].split('!')[0])
cz = _readCellLine(fileLines[4].split('!')[0])
# Invert the indexing of the vector to start from the bottom.
cz = cz[::-1]
# Adjust the reference point to the bottom south west corner
oz = oz - np.sum(cz)
# Now generate the coordinates for from cell width and origin
cox = ox + np.cumsum(cx)
cox = np.insert(cox, 0, ox)
coy = oy + np.cumsum(cy)
coy = np.insert(coy, 0, oy)
coz = oz + np.cumsum(cz)
coz = np.insert(coz, 0, oz)
# Set the dims and coordinates for the output
output.SetDimensions(dim[0], dim[1], dim[2])
# Convert to VTK array for setting coordinates
output.SetXCoordinates(interface.convert_array(cox, deep=True))
output.SetYCoordinates(interface.convert_array(coy, deep=True))
output.SetZCoordinates(interface.convert_array(coz, deep=True))
return output
def __ubc_mesh_data_3d(self, filename_mesh, filename_models, output):
"""Helper method to read a 3D mesh"""
# Construct/read the mesh
if self.need_to_readMesh():
TensorMeshReader.ubc_mesh_3d(filename_mesh, self.__mesh)
self.need_to_readMesh(flag=False)
output.DeepCopy(self.__mesh)
if self.need_to_readModels() and self.this_has_models():
self.__models = []
for f in filename_models:
# Read the model data
self.__models.append(TensorMeshReader.ubc_model_3d(f))
self.need_to_readModels(flag=False)
return output
def __ubc_tensor_mesh(self, filename_mesh, filename_models, output):
"""Wrapper to Read UBC GIF 2D and 3D meshes. UBC Mesh 2D/3D models are
defined using a 2-file format. The "mesh" file describes how the data is
descritized. The "model" file lists the physical property values for all
cells in a mesh. A model file is meaningless without an associated mesh
file. If the mesh file is 2D, then then model file must also be in the
2D format (same for 3D).
Args:
filename_mesh (str) : The mesh filename as an absolute path for the
input mesh file in UBC 2D/3D Mesh Format
filename_models (str or list(str)) : The model filename(s) as an
absolute path for the input model file in UBC 2D/3D Model Format.
output (vtkRectilinearGrid) : The output data object
Return:
vtkRectilinearGrid :
a ``vtkRectilinearGrid`` generated from the UBC 2D/3D Mesh grid.
Mesh is defined by the input mesh file.
Cell data is defined by the input model file.
"""
# Check if the mesh is a UBC 2D mesh
if self.is_2d():
self.__ubc_mesh_data_2d(filename_mesh, filename_models, output)
# Check if the mesh is a UBC 3D mesh
elif self.is_3d():
self.__ubc_mesh_data_3d(filename_mesh, filename_models, output)
else:
raise _helpers.PVGeoError('File format not recognized')
return output
def RequestData(self, request, inInfo, outInfo):
"""Handles data request by the pipeline."""
# Get output:
output = self.GetOutputData(outInfo, 0)
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
self.__ubc_tensor_mesh(
self.get_mesh_filename(), self.get_model_filenames(), output
)
# Place the model data for given timestep onto the mesh
if len(self.__models) > i:
TensorMeshReader.place_model_on_mesh(
output, self.__models[i], self.get_data_name()
)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Handles info request by pipeline about timesteps and grid extents."""
# Call parent to handle time stuff
ubcMeshReaderBase.RequestInformation(self, request, inInfo, outInfo)
# Now set whole output extent
if self.need_to_readMesh():
ext = self._read_extent()
info = outInfo.GetInformationObject(0)
# Set WHOLE_EXTENT: This is absolutely necessary
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
return 1
def clear_mesh(self):
"""Use to clean/rebuild the mesh"""
self.__mesh = vtk.vtkRectilinearGrid()
ubcMeshReaderBase.clear_models(self)
def clear_models(self):
"""Use to clean the models and reread"""
self.__models = []
ubcMeshReaderBase.clear_models(self)
###############################################################################
class TensorMeshAppender(ModelAppenderBase):
"""This filter reads a timeseries of models and appends it to an input
``vtkRectilinearGrid``
"""
__displayname__ = 'UBC Tensor Mesh Appender'
__category__ = 'filter'
def __init__(self, **kwargs):
ModelAppenderBase.__init__(
self,
inputType='vtkRectilinearGrid',
outputType='vtkRectilinearGrid',
**kwargs
)
def _read_up_front(self):
"""Internal helepr to read data at start"""
reader = ubcMeshReaderBase.ubc_model_3d
if not self._is_3D:
# Note how in UBC format, 2D grids are specified on an XZ plane (no Y component)
# This will only work prior to rotations to account for real spatial reference
reader = TensorMeshReader.ubc_model_2d
self._models = []
for f in self._model_filenames:
# Read the model data
self._models.append(reader(f))
self.need_to_read(flag=False)
return
def _place_on_mesh(self, output, idx=0):
"""Internal helepr to place a model on the mesh for a given index"""
TensorMeshReader.place_model_on_mesh(
output, self._models[idx], self.get_data_name()
)
return
###############################################################################
class TopoMeshAppender(AlgorithmBase):
"""This filter reads a single discrete topography file and appends it as a
boolean data array.
"""
__displayname__ = 'Append UBC Discrete Topography'
__category__ = 'filter'
def __init__(
self, inputType='vtkRectilinearGrid', outputType='vtkRectilinearGrid', **kwargs
):
AlgorithmBase.__init__(
self,
nInputPorts=1,
inputType=inputType,
nOutputPorts=1,
outputType=outputType,
)
self._topoFileName = kwargs.get('filename', None)
self.__indices = None
self.__need_to_read = True
self.__ne, self.__nn = None, None
def need_to_read(self, flag=None):
"""Ask self if the reader needs to read the files again
Args:
flag (bool): if the flag is set then this method will set the read
status
Return:
bool:
The status of the reader aspect of the filter.
"""
if flag is not None and isinstance(flag, (bool, int)):
self.__need_to_read = flag
return self.__need_to_read
def Modified(self, read_again=True):
"""Call modified if the files needs to be read again again."""
if read_again:
self.__need_to_read = read_again
AlgorithmBase.Modified(self)
def modified(self, read_again=True):
"""Call modified if the files needs to be read again again."""
return self.Modified(read_again=read_again)
def _read_up_front(self):
"""Internal helepr to read data at start"""
# Read the file
content = np.genfromtxt(
self._topoFileName, dtype=str, delimiter='\n', comments='!'
)
dim = content[0].split()
self.__ne, self.__nn = int(dim[0]), int(dim[1])
self.__indices = pd.read_csv(
StringIO("\n".join(content[1::])),
names=['i', 'j', 'k'],
delim_whitespace=True,
)
# NOTE: K indices are inverted
self.need_to_read(flag=False)
return
def _place_on_mesh(self, output):
"""Internal helepr to place an active cells model on the mesh"""
# Check mesh extents to math topography
nx, ny, nz = output.GetDimensions()
nx, ny, nz = nx - 1, ny - 1, nz - 1 # because GetDimensions counts the nodes
topz = np.max(self.__indices['k']) + 1
if nx != self.__nn or ny != self.__ne or topz > nz:
raise _helpers.PVGeoError(
'Dimension mismatch between input grid and topo file.'
)
# # Adjust the k indices to be in caarteian system
# self.__indices['k'] = nz - self.__indices['k']
# Fill out the topo and add it as model as it will be in UBC format
# Create a 3D array of 1s and zeros (1 means beneath topo or active)
topo = np.empty((ny, nx, nz), dtype=float)
topo[:] = np.nan
for row in self.__indices.values:
i, j, k = row
topo[i, j, k + 1 :] = 0
topo[i, j, : k + 1] = 1
# Add as model... ``place_model_on_mesh`` handles the rest
TensorMeshReader.place_model_on_mesh(
output, topo.flatten(), 'Active Topography'
)
return
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
output = self.GetOutputData(outInfo, 0)
output.DeepCopy(pdi) # ShallowCopy if you want changes to propagate upstream
# Perfrom task:
if self.__need_to_read:
self._read_up_front()
# Place the model data for given timestep onto the mesh
self._place_on_mesh(output)
return 1
#### Setters and Getters ####
def clear_topo_file(self):
"""Use to clear data file name."""
self._topoFileName = None
self.Modified(read_again=True)
def set_topo_filename(self, filename):
"""Use to set the file names for the reader. Handles single strings only"""
if filename is None:
return # do nothing if None is passed by a constructor on accident
elif isinstance(filename, str) and self._topoFileName != filename:
self._topoFileName = filename
self.Modified()
return 1
###############################################################################
#
# import numpy as np
# indices = np.array([[0,0,1],
# [0,1,1],
# [0,2,1],
# [1,0,1],
# [1,1,1],
# [1,2,1],
# [2,0,1],
# [2,1,1],
# [2,2,1],
# ])
#
# topo = np.empty((3,3,3), dtype=float)
# topo[:] = np.nan
#
# for row in indices:
# i, j, k = row
# topo[i, j, k:] = 0
# topo[i, j, :k] = 1
# topo
| bsd-3-clause | 8,269,175,232,248,584,000 | 37.371278 | 110 | 0.557599 | false | 3.942775 | false | false | false |
Nettacker/Nettacker | lib/transactions/maltego/nettacker_transforms/src/nettacker_transforms/transforms/wp_xmlrpc_dos_vuln.py | 1 | 1880 | import random
from canari.maltego.transform import Transform
from canari.maltego.entities import URL
from canari.framework import EnableDebugWindow
from common.entities import NettackerScan
from lib.scan.wp_xmlrpc.engine import start
from database.db import __logs_by_scan_id as find_log
__author__ = 'Shaddy Garg'
__copyright__ = 'Copyright 2018, nettacker_transforms Project'
__credits__ = []
__license__ = 'GPLv3'
__version__ = '0.1'
__maintainer__ = 'Shaddy Garg'
__email__ = '[email protected]'
__status__ = 'Development'
@EnableDebugWindow
class WordpressXMLPRCScan(Transform):
"""TODO: Your transform description."""
# The transform input entity type.
input_type = NettackerScan
def do_transform(self, request, response, config):
# TODO: write your code here.
scan_request = request.entity
scan_id = "".join(random.choice("0123456789abcdef") for x in range(32))
scan_request.ports = scan_request.ports.split(', ') if scan_request.ports is not None else None
start(scan_request.host, [], [], scan_request.ports, scan_request.timeout_sec, scan_request.thread_no,
1, 1, 'abcd', 0, "en", scan_request.verbose, scan_request.socks_proxy, scan_request.retries, [], scan_id,
"Through Maltego")
results = find_log(scan_id, "en")
for result in results:
url = result["HOST"] + ":" + result["PORT"]
response += URL(url=url, title=result["DESCRIPTION"],
short_title="Site is vulnerable to XMLPRC DOS attacks ",
link_label='wp_xmlrpc_dos_vuln')
return response
def on_terminate(self):
"""This method gets called when transform execution is prematurely terminated. It is only applicable for local
transforms. It can be excluded if you don't need it."""
pass
| gpl-3.0 | 8,579,453,768,622,444,000 | 37.367347 | 119 | 0.653723 | false | 3.74502 | false | false | false |
xmichael/tagger | src/main/lib/logtool.py | 1 | 1546 | ## Just some utility functions for logging messages. Most important is getLogger.
import sys, pprint, json, logging, configuration
### Constants ###
#################
## Can only log in stderr (or environ['wsgi.errors']) when using WSGI:
def dbg(msg):
print >> sys.stderr, msg
def pp(obj):
"""
shortcut for pretty printing a python object on the debug channel
"""
pprinter = pprint.PrettyPrinter(indent=4)
return pprinter.pformat(obj)
def jsonpp(obj):
"""
shortcut for pretty printing a json object on the debug channel
"""
pp(json.loads(obj))
def getLogger(name, parent=None):
""" Create a logger with some sane configuration
Args:
name (str): name of logger. Should be the name of the file.
parent (str): name of parent logger to inherit its properties
"""
if parent:
# create child logger that inherits properties from father
logger = logging.getLogger(parent + "." + name)
else:
#create parent logger with new properties
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(configuration.get_log_file())
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handler
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
return logger
| bsd-3-clause | -8,895,426,044,850,906,000 | 31.893617 | 93 | 0.651358 | false | 4.35493 | false | false | false |
Labbiness/Pancake | setup.py | 1 | 1437 | # -*- encoding:utf-8 -*-
#
# Copyright (c) 2017-2018 Shota Shimazu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setuptools import setup, find_packages
import sys
sys.path.append('./pancake')
sys.path.append('./tests')
if __name__ == "__main__":
setup(
name = "Pancake",
version='0.0.1',
author = "Shota Shimazu",
author_email = "[email protected]",
packages = find_packages(),
install_requires=[
],
entry_points = {
'console_scripts':[
'pancake = Pancake.Pancake:main',
],
},
description = "Abstract layer for any package manager.",
long_description = "Abstract layer for any package manager.",
url = "https://github.com/shotastage/Pancake.git",
license = "Apache",
platforms = ["POSIX", "Windows", "Mac OS X"],
test_suite = "djconsole_test.suite",
)
| apache-2.0 | 5,198,664,308,909,516,000 | 31.659091 | 74 | 0.631872 | false | 3.811671 | false | false | false |
ciarams87/PyU4V | PyU4V/tools/openstack/migrate.py | 1 | 7265 | # Copyright (c) 2020 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenStack migrate script.
The script migrates volumes from the old SMI-S Masking view to the
new REST Masking view used from Pike onwards
"""
from __future__ import print_function
from builtins import input
import sys
from PyU4V.tools.openstack import migrate_utils
from PyU4V import univmax_conn
sys.path.append('../../..')
sys.path.append('.')
conn = univmax_conn.U4VConn()
utils = migrate_utils.MigrateUtils(conn)
utils.smart_print(
'********************************************************************',
migrate_utils.DEBUG)
utils.smart_print(
'*** Welcome to the migration script for the VMAX/PowerMax driver ***',
migrate_utils.DEBUG)
utils.smart_print(
'*** to migrate from SMI-S masking view to REST masking view. ***',
migrate_utils.DEBUG)
utils.smart_print(
'*** This is recommended if you intend using live migration to ***',
migrate_utils.DEBUG)
utils.smart_print(
'*** move from one compute node to another. ***',
migrate_utils.DEBUG)
utils.smart_print(
'********************************************************************',
migrate_utils.DEBUG)
utils.smart_print('version is %s', migrate_utils.DEBUG, migrate_utils.VERSION)
masking_view_list = conn.provisioning.get_masking_view_list()
is_revert = False
no_action = True
if len(sys.argv) == 2:
if sys.argv[1] == 'revert':
is_revert = True
else:
utils.smart_print('%s is not a valid argument.',
migrate_utils.DEBUG, sys.argv[1])
sys.exit()
def revert_case(masking_view_name):
"""The revert case of the migrate process
:param masking_view_name: masking view name -- str
:returns: masking view details -- dict
element details -- dict
no action flag -- boolean
"""
if utils.check_masking_view_for_migration(
masking_view_name, True):
utils.smart_print(
'NEW MASKING VIEW IS %s',
migrate_utils.DEBUG, masking_view_name)
masking_view_details = (
utils.get_elements_from_masking_view(masking_view_name))
# The storage group is the parent SG
# Get the list of child SGs
child_storage_group_list = (
conn.provisioning.get_child_storage_groups_from_parent(
masking_view_details['storagegroup']))
element_details, masking_view_details['storagegroup'] = (
utils.choose_storage_group(
masking_view_name, child_storage_group_list,
masking_view_details['portgroup'],
masking_view_details['initiatorgroup'],
is_revert))
# Check if masking view exists and if it does validate it
if element_details:
utils.get_or_create_masking_view(
element_details,
masking_view_details['portgroup'],
masking_view_details['initiatorgroup'],
is_revert)
else:
utils.smart_print(
'NO MIGRATION', migrate_utils.WARNING)
sys.exit()
return masking_view_details, element_details, False
else:
return dict(), dict(), True
def migrate_case(masking_view_name):
"""The revert case of the migrate process
:param masking_view_name: masking view name -- str
:returns: masking view details -- dict
element details -- dict
no action flag -- boolean
"""
if utils.check_masking_view_for_migration(masking_view_name):
utils.smart_print(
'OLD MASKING VIEW IS %s',
migrate_utils.DEBUG, masking_view_name)
masking_view_details = (
utils.get_elements_from_masking_view(masking_view_name))
# Compile the new names of the SGs and MV
element_details = utils.compile_new_element_names(
masking_view_name, masking_view_details['portgroup'],
masking_view_details['initiatorgroup'],
masking_view_details['storagegroup'])
# Check if masking view exists and if it does validate it
utils.get_or_create_masking_view(
element_details, masking_view_details['portgroup'],
masking_view_details['initiatorgroup'])
return masking_view_details, element_details, False
else:
return dict(), dict(), True
def move_volumes(masking_view_details, element_details):
"""Move volumes from one masking view to another
:param masking_view_details: masking view details -- dict
:param element_details: element details -- dict
"""
# Check the qos setting of source and target storage group
utils.set_qos(
masking_view_details['storagegroup'],
element_details['new_sg_name'])
volume_list, create_volume_flag = utils.get_volume_list(
masking_view_details['storagegroup'])
if volume_list:
message = utils.move_volumes_from_source_to_target(
volume_list, masking_view_details['storagegroup'],
element_details['new_sg_name'], create_volume_flag)
print_str = '%s SOURCE STORAGE GROUP REMAINS'
utils.smart_print(
print_str, migrate_utils.DEBUG,
masking_view_details['storagegroup'])
utils.print_pretty_table(message)
new_storage_group = utils.get_storage_group(
element_details['new_sg_name'])
print_str = '%s TARGET STORAGE GROUP DETAILS:'
utils.smart_print(
print_str, migrate_utils.DEBUG,
element_details['new_sg_name'])
utils.print_pretty_table(new_storage_group)
for masking_view in masking_view_list:
if utils.validate_masking_view(masking_view, is_revert):
txt = 'Do you want to migrate %s. Y/N or X(exit): ' % masking_view
txt_out = input(txt)
if utils.check_input(txt_out, 'Y'):
if is_revert:
masking_view_components, element_dict, no_action = (
revert_case(masking_view))
else:
masking_view_components, element_dict, no_action = (
migrate_case(masking_view))
# Get the volumes in the storage group
if masking_view_components and (
'storagegroup' in masking_view_components):
move_volumes(masking_view_components, element_dict)
else:
utils.smart_print('NO MIGRATION', migrate_utils.WARNING)
elif utils.check_input(txt_out, 'X'):
sys.exit()
if no_action:
utils.smart_print(
'No OpenStack masking views eligible for migration.',
migrate_utils.DEBUG)
| mit | 42,564,993,807,781,930 | 36.838542 | 78 | 0.615416 | false | 3.939805 | false | false | false |
mailgun/talon | talon/signature/learning/helpers.py | 1 | 6879 | # -*- coding: utf-8 -*-
""" The module provides:
* functions used when evaluating signature's features
* regexp's constants used when evaluating signature's features
"""
from __future__ import absolute_import
import unicodedata
import regex as re
from talon.utils import to_unicode
from talon.signature.constants import SIGNATURE_MAX_LINES
rc = re.compile
RE_EMAIL = rc('\S@\S')
RE_RELAX_PHONE = rc('(\(? ?[\d]{2,3} ?\)?.{,3}?){2,}')
RE_URL = rc(r'''https?://|www\.[\S]+\.[\S]''')
# Taken from:
# http://www.cs.cmu.edu/~vitor/papers/sigFilePaper_finalversion.pdf
# Line matches the regular expression "^[\s]*---*[\s]*$".
RE_SEPARATOR = rc('^[\s]*---*[\s]*$')
# Taken from:
# http://www.cs.cmu.edu/~vitor/papers/sigFilePaper_finalversion.pdf
# Line has a sequence of 10 or more special characters.
RE_SPECIAL_CHARS = rc(('^[\s]*([\*]|#|[\+]|[\^]|-|[\~]|[\&]|[\$]|_|[\!]|'
'[\/]|[\%]|[\:]|[\=]){10,}[\s]*$'))
RE_SIGNATURE_WORDS = rc(('(T|t)hank.*,|(B|b)est|(R|r)egards|'
'^sent[ ]{1}from[ ]{1}my[\s,!\w]*$|BR|(S|s)incerely|'
'(C|c)orporation|Group'))
# Taken from:
# http://www.cs.cmu.edu/~vitor/papers/sigFilePaper_finalversion.pdf
# Line contains a pattern like Vitor R. Carvalho or William W. Cohen.
RE_NAME = rc('[A-Z][a-z]+\s\s?[A-Z][\.]?\s\s?[A-Z][a-z]+')
INVALID_WORD_START = rc('\(|\+|[\d]')
BAD_SENDER_NAMES = [
# known mail domains
'hotmail', 'gmail', 'yandex', 'mail', 'yahoo', 'mailgun', 'mailgunhq',
'example',
# first level domains
'com', 'org', 'net', 'ru',
# bad words
'mailto'
]
def binary_regex_search(prog):
'''Returns a function that returns 1 or 0 depending on regex search result.
If regular expression compiled into prog is present in a string
the result of calling the returned function with the string will be 1
and 0 otherwise.
>>> import regex as re
>>> binary_regex_search(re.compile("12"))("12")
1
>>> binary_regex_search(re.compile("12"))("34")
0
'''
return lambda s: 1 if prog.search(s) else 0
def binary_regex_match(prog):
'''Returns a function that returns 1 or 0 depending on regex match result.
If a string matches regular expression compiled into prog
the result of calling the returned function with the string will be 1
and 0 otherwise.
>>> import regex as re
>>> binary_regex_match(re.compile("12"))("12 3")
1
>>> binary_regex_match(re.compile("12"))("3 12")
0
'''
return lambda s: 1 if prog.match(s) else 0
def flatten_list(list_to_flatten):
"""Simple list comprehension to flatten list.
>>> flatten_list([[1, 2], [3, 4, 5]])
[1, 2, 3, 4, 5]
>>> flatten_list([[1], [[2]]])
[1, [2]]
>>> flatten_list([1, [2]])
Traceback (most recent call last):
...
TypeError: 'int' object is not iterable
"""
return [e for sublist in list_to_flatten for e in sublist]
def contains_sender_names(sender):
'''Returns a functions to search sender\'s name or it\'s part.
>>> feature = contains_sender_names("Sergey N. Obukhov <[email protected]>")
>>> feature("Sergey Obukhov")
1
>>> feature("BR, Sergey N.")
1
>>> feature("Sergey")
1
>>> contains_sender_names("<[email protected]>")("Serobnic")
1
>>> contains_sender_names("<[email protected]>")("serobnic")
1
'''
names = '( |$)|'.join(flatten_list([[e, e.capitalize()]
for e in extract_names(sender)]))
names = names or sender
if names != '':
return binary_regex_search(re.compile(names))
return lambda s: 0
def extract_names(sender):
"""Tries to extract sender's names from `From:` header.
It could extract not only the actual names but e.g.
the name of the company, parts of email, etc.
>>> extract_names('Sergey N. Obukhov <[email protected]>')
['Sergey', 'Obukhov', 'serobnic']
>>> extract_names('')
[]
"""
sender = to_unicode(sender, precise=True)
# Remove non-alphabetical characters
sender = "".join([char if char.isalpha() else ' ' for char in sender])
# Remove too short words and words from "black" list i.e.
# words like `ru`, `gmail`, `com`, `org`, etc.
sender = [word for word in sender.split() if len(word) > 1 and
not word in BAD_SENDER_NAMES]
# Remove duplicates
names = list(set(sender))
return names
def categories_percent(s, categories):
'''Returns category characters percent.
>>> categories_percent("qqq ggg hhh", ["Po"])
0.0
>>> categories_percent("q,w.", ["Po"])
50.0
>>> categories_percent("qqq ggg hhh", ["Nd"])
0.0
>>> categories_percent("q5", ["Nd"])
50.0
>>> categories_percent("s.s,5s", ["Po", "Nd"])
50.0
'''
count = 0
s = to_unicode(s, precise=True)
for c in s:
if unicodedata.category(c) in categories:
count += 1
return 100 * float(count) / len(s) if len(s) else 0
def punctuation_percent(s):
'''Returns punctuation percent.
>>> punctuation_percent("qqq ggg hhh")
0.0
>>> punctuation_percent("q,w.")
50.0
'''
return categories_percent(s, ['Po'])
def capitalized_words_percent(s):
'''Returns capitalized words percent.'''
s = to_unicode(s, precise=True)
words = re.split('\s', s)
words = [w for w in words if w.strip()]
words = [w for w in words if len(w) > 2]
capitalized_words_counter = 0
valid_words_counter = 0
for word in words:
if not INVALID_WORD_START.match(word):
valid_words_counter += 1
if word[0].isupper() and not word[1].isupper():
capitalized_words_counter += 1
if valid_words_counter > 0 and len(words) > 1:
return 100 * float(capitalized_words_counter) / valid_words_counter
return 0
def many_capitalized_words(s):
"""Returns a function to check percentage of capitalized words.
The function returns 1 if percentage greater then 65% and 0 otherwise.
"""
return 1 if capitalized_words_percent(s) > 66 else 0
def has_signature(body, sender):
'''Checks if the body has signature. Returns True or False.'''
non_empty = [line for line in body.splitlines() if line.strip()]
candidate = non_empty[-SIGNATURE_MAX_LINES:]
upvotes = 0
for line in candidate:
# we check lines for sender's name, phone, email and url,
# those signature lines don't take more then 27 lines
if len(line.strip()) > 27:
continue
elif contains_sender_names(sender)(line):
return True
elif (binary_regex_search(RE_RELAX_PHONE)(line) +
binary_regex_search(RE_EMAIL)(line) +
binary_regex_search(RE_URL)(line) == 1):
upvotes += 1
if upvotes > 1:
return True
| apache-2.0 | -8,025,110,656,605,266,000 | 29.303965 | 79 | 0.594709 | false | 3.321584 | false | false | false |
Farious/PersonTracker | Source/LoadImageTextPrint_sample.py | 1 | 4907 | # import numpy as np
import cv2
# import linecache
import LoadImagePrintText as f
# >>> import os
# >>> os.chdir("C:\Users\dario\Desktop\Dropbox\Work\DemoHDA\git\Source\\")
## Input
cam = 60
frame = 26
debugREID = 1
img = f.loadImagePrintText(cam, frame, debugREID=0, debugPD=1, PDthreshold=20)
cv2.namedWindow("1", cv2.WINDOW_NORMAL)
cv2.resizeWindow("1", 2560 / 4, 1600 / 4)
cv2.imshow("1", img)
cv2.waitKey(0)
## Pre-defined static variables
# CV_FILLED = -1
# red = (0, 0, 255)
# green = (0, 255, 0)
# black = (0, 0, 0)
# white = (255, 255, 255)
# thickness = 8
# if cam == 60:
# fontScale = 2 # 2 for Camera 60 (4MPixel), 1 for other cameras (1MPixel)
# else:
# fontScale = 1
#
# # user = "dario" # "Toshiba"
# # JPEGspath = "C:\Users\\" + user + "\Desktop\Dropbox\Work\HDA_Dataset\VIDeoSequences\JPEG\camera60\\"
# JPEGspath = "RESOURCES\JPEG\camera" + str(cam) + "\\"
# filename = "I000" + str(frame) + ".jpeg"
# image = cv2.imread(JPEGspath + filename)
#
# cv2.namedWindow("1", cv2.WINDOW_NORMAL)
# cv2.resizeWindow("1", 2560/2, 1600/2)
#
# # DetectionsPath = "C:\\Users\\" + user + "\Desktop\Dropbox\Work\DemoHDA\\7.0.SmallLimited\\"
# # detectionfile = "\set60\V000\I000" + str(frame) + ".txt"
# detectionsPath = "RESOURCES\Detections\set" + str(cam) + "\V000\\"
# detectionFile = "I000" + str(frame) + ".txt"
#
# # line = 'place-holder'
# ind = 1
#
# fileText = open(detectionsPath + detectionFile, 'r')
# lines = fileText.readlines()
# fileText.close()
#
# res1 = [line.rstrip('\n').split(',') for line in lines]
# for i, values in enumerate(res1):
# res1[i] = [int(float(value)) for value in values] # [:4]
# leftTop = np.array((res1[i][0], res1[i][1]))
# rightBottom = leftTop + np.array((res1[i][2], res1[i][3]))
# left = res1[i][0]
# top = res1[i][1]
# right = left+res1[i][2]
# bottom = top+res1[i][3]
# if len(res1[i]) > 5: # There is a re-IDentification for this detection
# correctID = res1[i][5]
# REIDs = res1[i][6:]
#
# imgR = image
# ## in thickness CV_FILLED is -1
# ## Coordinate frame is (x,y) starting at top-left corner
# ## cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]])
# cv2.rectangle(imgR, (left, top), (right, bottom), red, thickness)
#
# ## Given a list of names, put one white box for each, on top of the image, and print the text on each respective
# # whitebox
#
# # Standard person names are PersonXXX
# texts = [str(k+1) + ".Person" + str(ID).zfill(3) for k, ID in enumerate(REIDs)]
# # But for a few select persons that we do know their first name, we can re-name the text to their names
# # It would probably be nicer if made in a single cycle
# for k, ID in enumerate(REIDs):
# if ID == 22:
# texts[k] = str(k+1) + ".Matteo"
# if ID == 32:
# texts[k] = str(k+1) + ".Dario"
# # print texts[k]
#
# # texts = ("1.Matteo","2.Dario")
# textHeight = 25*fontScale # 50 for cv2.FONT_HERSHEY_DUPLEX in cam60 image sizes
# letterWidth = 18*fontScale
# # for j, text in enumerate(texts):
# for k, ID in enumerate(REIDs):
# text = texts[k]
# j=k
# cv2.rectangle(imgR, (left, top-textHeight*j),
# (left + letterWidth*len(text), top-textHeight*(j+1)), white, CV_FILLED) # tuple(topleft + (textWIDth, 0))
# ## cv2.putText(img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]])
# if ID == correctID:
# color = green
# else:
# color = red
# if debugREID == 0:
# color = black
# cv2.putText(imgR, text, (left, top-textHeight*j), cv2.FONT_HERSHEY_DUPLEX, fontScale, color, thickness=thickness/2)
#
# cv2.imshow("1",imgR)
#
#
# cv2.waitKey(0)
# FONT_HERSHEY_SIMPLEX, FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX,
# FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, or FONT_HERSHEY_SCRIPT_COMPLEX
# cv2.putText(imgR, text, tuple(topleft), cv2.FONT_HERSHEY_SIMPLEX, 3, black, thickness/2)
# cv2.putText(imgR, text, tuple(topleft+(0, textHeight)), cv2.FONT_HERSHEY_PLAIN, 3, black, thickness/2)
# cv2.putText(imgR, text, tuple(topleft+(0, textHeight*2)), cv2.FONT_HERSHEY_DUPLEX, 3, black, thickness/2)
# cv2.putText(imgR, text, tuple(topleft+(0, textHeight*3)), cv2.FONT_HERSHEY_COMPLEX, 3, black, thickness/2)
# cv2.putText(imgR, text, tuple(topleft+(0, textHeight*4)), cv2.FONT_HERSHEY_TRIPLEX, 3, black, thickness/2)
# cv2.putText(imgR, text, tuple(topleft+(0, textHeight*5)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 3, black, thickness/2)
# cv2.putText(imgR, text, tuple(topleft+(0, textHeight*6)), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 3, black, thickness/2)
# cv2.putText(imgR, text, tuple(topleft+(0, textHeight*7)), cv2.FONT_HERSHEY_SCRIPT_COMPLEX, 3, black, thickness/2)
| apache-2.0 | -2,488,138,149,678,787,000 | 37.944444 | 130 | 0.627675 | false | 2.600424 | false | false | false |
michaelcontento/whirlwind | whirlwind/view/filters.py | 1 | 6750 | from datetime import datetime
import pytz, sys, re, locale
from dateutil import parser
try:
import simplejson
except ImportError:
import json as simplejson
class Filters():
'''
Checks whether the passed in value is considered useful otherwise will return None
will return None on the following values:
None
''
'null'
'undefined'
{}
'''
@staticmethod
def val(val):
if val == None :
return None
if val == 'null' :
return None
if val == 'undefined' :
return None
if val == 0 :
return val
if isinstance(val, basestring) and len(val) == 0 :
return None
if isinstance(val, dict) and len(val) == 0 :
return None
return val
@staticmethod
def str(val):
if not val:
return ''
#TODO: sensibly handle:
# dicts => json
# dates => pretty
# numbers => add commas
return str(val)
'''
Checks for various styles of true.
matches on True, 'true', 'on'
'''
@staticmethod
def is_true(val):
if not val :
return False
if isinstance(val, basestring) :
if val == 'True' or val == 'true' or val == 'on' :
return True
return False
if val == True :
return True
return False
@staticmethod
def strip_html(data):
if not data :
return
p = re.compile(r'<[^<]*?/?>')
return p.sub('', data)
@staticmethod
def long_timestamp(dt_str,tz="America/New_York"):
utc_dt = Filters._convert_utc_to_local(dt_str,tz)
if utc_dt:
return utc_dt.strftime("%A, %d. %B %Y %I:%M%p")
else:
return dt_str
@staticmethod
def short_timestamp(dt_str,tz="America/New_York"):
tz_dt = Filters._convert_utc_to_local(dt_str,tz)
return tz_dt.strftime("%m/%d/%Y %I:%M")
@staticmethod
def short_date(dt_str,tz="America/New_York"):
tz_dt = Filters._convert_utc_to_local(dt_str,tz)
return tz_dt.strftime("%m/%d/%Y")
@staticmethod
def ellipsis(data,limit,append='...'):
return (data[:limit] + append) if len(data) > limit else data
'''
filter to translate a dict to json
'''
@staticmethod
def to_json(dict):
return simplejson.dumps(dict, True)
@staticmethod
def idize(str):
return (re.sub(r'[^0-9a-zA-Z]', '_',str)).lower()
@staticmethod
def _convert_utc_to_local(utc_dt,tz):
try:
print utc_dt
local = pytz.timezone(tz)
local_dt = utc_dt.replace(tzinfo = local)
return local_dt.astimezone (pytz.utc)
except Exception:
print sys.exc_info()
return None
@staticmethod
def url_pretty(str):
if not str :
return
url = re.sub(r'[^0-9a-zA-Z]', '_',Filters.str(str))
url = re.sub('_+', '_',url)
#max 32 chars.
if len(url) > 32 :
url = url[0:32]
return url
@staticmethod
def add_commas(val,as_data_type='int',the_locale=locale.LC_ALL):
locale.setlocale(the_locale, "")
if as_data_type == 'int':
return locale.format('%d', int(val), True)
elif as_data_type == 'float':
return locale.format('%f', float(val), True)
else:
return val
@staticmethod
def pluralize(str):
pl = Pluralizer()
return pl.plural(str)
'''
Does a get on the dict. will work with dot operator, and not throw an exception
returns default if the key doesn't work
will also work to reach into lists via integer keys.
example:
{
'key1' : {
'subkey' : [{'subsubkey1':9},{}]
}
}
Filters.dict_get('key1.subkey.0.subsubkey1') => 9
'''
@staticmethod
def dict_get(dict, key, default=None):
#Surround this with try in case key is None or not a string or something
try:
keys = key.split(".")
except:
return default
tmp = dict
for k in keys :
try:
tmp = tmp[k]
except TypeError:
#Issue may be that we have something like '0'. Try converting to a number
try:
tmp = tmp[int(k)]
except:
#Either couldn't convert or went out of bounds on list
return default
except:
#Exception other than TypeError probably missing key, so default
return default
return tmp
class Pluralizer():
#
# (pattern, search, replace) regex english plural rules tuple
#
rule_tuple = (
('[ml]ouse$', '([ml])ouse$', '\\1ice'),
('child$', 'child$', 'children'),
('booth$', 'booth$', 'booths'),
('foot$', 'foot$', 'feet'),
('ooth$', 'ooth$', 'eeth'),
('l[eo]af$', 'l([eo])af$', 'l\\1aves'),
('sis$', 'sis$', 'ses'),
('man$', 'man$', 'men'),
('ife$', 'ife$', 'ives'),
('eau$', 'eau$', 'eaux'),
('lf$', 'lf$', 'lves'),
('[sxz]$', '$', 'es'),
('[^aeioudgkprt]h$', '$', 'es'),
('(qu|[^aeiou])y$', 'y$', 'ies'),
('$', '$', 's')
)
def regex_rules(rules=rule_tuple):
for line in rules:
pattern, search, replace = line
yield lambda word: re.search(pattern, word) and re.sub(search, replace, word)
def plural(noun):
for rule in regex_rules():
result = rule(noun)
if result:
return result
class Cycler():
cycle_registry = {}
@staticmethod
def uuid():
import uuid
return uuid.uuid1()
@staticmethod
def cycle(values,name='default'):
if name in Cycler.cycle_registry:
try:
return Cycler.cycle_registry[name].next()
except StopIteration:
Cycler.cycle_registry[name] = iter(values)
return Cycler.cycle_registry[name].next()
else:
Cycler.cycle_registry[name] = iter(values)
return Cycler.cycle_registry[name].next()
| mit | 1,393,791,378,986,615,300 | 27.246862 | 90 | 0.480296 | false | 4.187345 | false | false | false |
cuemacro/chartpy | chartpy_examples/subplot_example.py | 1 | 2359 | __author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import pandas
# support Quandl 3.x.x
try:
import quandl as Quandl
except:
# if import fails use Quandl 2.x.x
import Quandl
from chartpy import Chart, Style
# get your own free bQuandl API key from https://www.quandl.com/
try:
from chartpy.chartcred import ChartCred
cred = ChartCred()
quandl_api_key = cred.quandl_api_key
except:
quandl_api_key = "x"
# choose run_example = 0 for everything
# run_example = 1 - plot US GDP QoQ (real) and nominal with Plotly/Bokeh/Matplotlib with subplots for each line
# run_example = 2 - plot US GDP QoQ (real + nominal) in two double plots (passing an array of dataframes)
run_example = 0
if run_example == 1 or run_example == 0:
df = Quandl.get(["FRED/A191RL1Q225SBEA", "FRED/A191RP1Q027SBEA"], authtoken=quandl_api_key)
df.columns = ["Real QoQ", "Nominal QoQ"]
# set the style of the plot
style = Style(title="US GDP", source="Quandl/Fred", subplots=True)
# Chart object is initialised with the dataframe and our chart style
chart = Chart(df=df, chart_type='line', style=style)
chart.plot(engine='matplotlib')
chart.plot(engine='bokeh')
chart.plot(engine='plotly')
if run_example == 2 or run_example == 0:
df = Quandl.get(["FRED/A191RL1Q225SBEA", "FRED/A191RP1Q027SBEA"], authtoken=quandl_api_key)
df.columns = ["Real QoQ", "Nominal QoQ"]
df = [df, df]
# set the style of the plot
style = Style(title="US GDP double plot", source="Quandl/Fred", subplots=True)
# Chart object is initialised with the dataframe and our chart style
chart = Chart(df=df, chart_type='line', style=style)
chart.plot(engine='bokeh')
chart.plot(engine='matplotlib')
chart.plot(engine='plotly') # TODO fix legends though
| apache-2.0 | -2,607,866,110,523,156,500 | 32.225352 | 121 | 0.705384 | false | 3.170699 | false | false | false |
monokrome/django-drift | setup.py | 1 | 1462 | import os
import sys
try:
from setuptools import setup
except ImportError:
from . import ez_setup
from setuptools import setup
parent_directory = os.path.abspath(os.path.dirname(__file__))
metafiles = {
'README.md': None,
'CHANGES.md': None,
'CLASSIFIERS.txt': None,
}
# The following bit will read each index from metafiles and fill it's value
# with the contents of that file if it is able to read the file.
for filename in metafiles:
try:
current_file = open(os.path.join(parent_directory, filename))
metafiles[filename] = current_file.read()
current_file.close()
except IOError:
pass
# No dependencies :)
dependencies = [
'celery>=3.1.6',
]
metadata = {
'name': 'django-drift',
'version': '0.1.2',
'description': 'Takes files and turns them into recrods in models. HOORAY!',
'long_description': metafiles['README.md'] + '\n\n' + metafiles['CHANGES.md'],
'classifiers': metafiles['CLASSIFIERS.txt'],
'author': 'Brandon R. Stoner',
'author_email': '[email protected]',
'url': 'http://github.com/monokrome/django-drift',
'keywords': '',
'packages': [
'drift',
'drift.management',
'drift.management.commands',
],
'package_data': {
'drift': ['templates/drift/*'],
},
'test_suite': 'drift.tests',
'install_requires': dependencies,
'tests_require': dependencies,
}
setup(**metadata)
| mit | 7,593,647,087,996,668,000 | 23.366667 | 82 | 0.629275 | false | 3.407925 | false | false | false |
davidhax0r/dojme | dojme/routes.py | 1 | 2223 | from dojme import app
from flask import render_template, request, redirect
from urlparse import urlparse
import httplib2
import re
def status_check(url):
"""
Get the headers of a web resource to check if it exists
"""
h = httplib2.Http()
try:
resp = h.request(url, 'HEAD')
if resp[0].status == 200:
return True
except (httplib2.RelativeURIError, httplib2.ServerNotFoundError):
return False
def check_protocol(url):
"""
Checks if http:// is present before Url
"""
parsed = urlparse(url)
if parsed.scheme == "":
return "http://" + url
else:
return url
def is_valid_url(url):
"""
Validates the URL input
"""
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?'
r'|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if regex.search(url):
return True
return False
@app.route("/")
def main():
return render_template("index.html")
@app.route("/form", methods=["GET", "POST"])
def route_form():
if request.method == "GET":
return redirect('/')
else:
web_address = request.form['webaddress']
web_address = check_protocol(web_address)
valid_url = is_valid_url(web_address)
if not valid_url:
return render_template("index.html")
else:
check_website = status_check(web_address)
if check_website:
return render_template("submit.html",
up="It's Up",
url=web_address)
else:
return render_template("submit.html",
down="It's Down",
url=web_address)
@app.errorhandler(404)
def handle_error():
"""
404 error handler function
"""
return render_template("404.html"), 404
| mit | 4,382,458,863,198,709,000 | 26.109756 | 73 | 0.504723 | false | 3.638298 | false | false | false |
lhfei/spark-in-action | spark-2.x/src/main/python/ml/correlation_example.py | 1 | 1843 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example for computing correlation matrix.
Run with:
bin/spark-submit examples/src/main/python/ml/correlation_example.py
"""
from __future__ import print_function
# $example on$
from pyspark.ml.linalg import Vectors
from pyspark.ml.stat import Correlation
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("CorrelationExample") \
.getOrCreate()
# $example on$
data = [(Vectors.sparse(4, [(0, 1.0), (3, -2.0)]),),
(Vectors.dense([4.0, 5.0, 0.0, 3.0]),),
(Vectors.dense([6.0, 7.0, 0.0, 8.0]),),
(Vectors.sparse(4, [(0, 9.0), (3, 1.0)]),)]
df = spark.createDataFrame(data, ["features"])
r1 = Correlation.corr(df, "features").head()
print("Pearson correlation matrix:\n" + str(r1[0]))
r2 = Correlation.corr(df, "features", "spearman").head()
print("Spearman correlation matrix:\n" + str(r2[0]))
# $example off$
spark.stop()
| apache-2.0 | 8,634,468,812,316,314,000 | 34.137255 | 74 | 0.658709 | false | 3.517176 | false | false | false |
kyleabeauchamp/mdtraj | mdtraj/tests/test_dtr.py | 1 | 11033 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors: Teng Lin
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
Test the cython dtr module
Note, this file cannot be located in the dtr subdirectory, because that
directory is not a python package (it has no __init__.py) and is thus tests
there are not discovered by nose
"""
import tempfile, os
import numpy as np
from mdtraj.formats import DTRTrajectoryFile, DCDTrajectoryFile
from nose.tools import assert_raises
from mdtraj.testing import get_fn, eq, DocStringFormatTester, raises
from shutil import rmtree
#TestDocstrings = DocStringFormatTester(dtr, error_on_none=True)
fn_dtr = get_fn('frame0.dtr')
fn_dcd = get_fn('frame0.dcd')
fn_pdb = get_fn('native.pdb')
temp = tempfile.mkdtemp(suffix='.dtr')
def teardown_module(module):
"""
Remove the temporary trajectory directory created by tests
in this file this gets automatically called by nose
"""
try:
rmtree(temp)
except OSError:
pass
def test_read():
"""
test the default read and compare against reference trajectory in dcd format
"""
dtr_traj = DTRTrajectoryFile(fn_dtr)
eq(len(dtr_traj), 501)
xyz, times, cell_lens, cell_angles = dtr_traj.read()
xyz2, cell_lens2, cell_angles2 = DCDTrajectoryFile(fn_dcd).read()
eq(xyz, xyz2)
eq(cell_lens, cell_lens2)
eq(cell_angles, cell_angles2)
def test_read_1():
""" test read with n_frame"""
xyz, times, cell_lens, cell_angles = DTRTrajectoryFile(fn_dtr).read()
xyz2, times2, cell_lens2, cell_angles2 = DTRTrajectoryFile(fn_dtr).read(n_frames=501)
eq(xyz, xyz2)
eq(times, times2)
eq(cell_lens, cell_lens2)
eq(cell_angles, cell_angles2)
def test_read_2():
""" test read with atom indices"""
indices = np.array([0, 3, 12, 4])
xyz, times, cell_lens, cell_angles = DTRTrajectoryFile(fn_dtr).read()
xyz2, times2, cell_lens2, cell_angles2 = DTRTrajectoryFile(fn_dtr).read(atom_indices=indices)
eq(xyz[:,indices,:], xyz2)
eq(times, times2)
eq(cell_lens, cell_lens2)
eq(cell_angles, cell_angles2)
def test_read_3():
"""test read with n_frames"""
dtr_traj = DTRTrajectoryFile(fn_dtr)
dtr_traj.seek(1)
xyz, times, cell_lens, cell_angles = dtr_traj.read(n_frames=900)
eq(len(xyz), 500)
def test_read_stride():
"Read dtr with stride"
with DTRTrajectoryFile(fn_dtr) as f:
xyz1, times1, box_lengths1, box_angles1 = f.read()
with DTRTrajectoryFile(fn_dtr) as f:
xyz2, times2, box_lengths2, box_angles2 = f.read(stride=2)
yield lambda: eq(xyz1[::2], xyz2)
yield lambda: eq(times1[::2], times2)
yield lambda: eq(box_lengths1[::2], box_lengths2)
yield lambda: eq(box_angles1[::2], box_angles2)
def test_read_4():
"""Read dtr with stride and n_frames"""
# dtr_traj = DTRTrajectoryFile(fn_dtr)
# dtr_traj.seek(1)
# xyz, times, cell_lens, cell_angles = dtr_traj.read(n_frames=300, stride=2)
# eq(len(xyz), 251)
with DTRTrajectoryFile(fn_dtr) as f:
xyz1, times1, box_lengths1, box_angles1 = f.read()
with DTRTrajectoryFile(fn_dtr) as f:
xyz2, times2, box_lengths2, box_angles2 = f.read(n_frames=300, stride=2)
yield lambda: eq(xyz1[::2], xyz2)
yield lambda: eq(times1[::2], times2)
yield lambda: eq(box_lengths1[::2], box_lengths2)
yield lambda: eq(box_angles1[::2], box_angles2)
def test_read_5():
"check streaming read of frames 1 at a time"
xyz_ref, times_ref, box_lengths_ref, box_angles_ref = DTRTrajectoryFile(fn_dtr).read()
reader = DTRTrajectoryFile(fn_dtr)
for i in range(len(xyz_ref)):
xyz, times, box_lenths, box_angles = reader.read(1)
eq(xyz_ref[np.newaxis, i], xyz)
eq(times_ref[np.newaxis, i], times)
eq(box_lengths_ref[np.newaxis, i], box_lenths)
eq(box_angles_ref[np.newaxis, i], box_angles)
def test_read_6():
"DTRReader: check streaming read followed by reading the 'rest'"
xyz_ref, times_ref, box_lengths_ref, box_angles_ref = DTRTrajectoryFile(fn_dtr).read()
reader = DTRTrajectoryFile(fn_dtr)
for i in range(int(len(xyz_ref)/2)):
xyz, times, box_lenths, box_angles = reader.read(1)
eq(xyz_ref[np.newaxis, i], xyz)
eq(times_ref[np.newaxis, i], times)
eq(box_lengths_ref[np.newaxis, i], box_lenths)
eq(box_angles_ref[np.newaxis, i], box_angles)
xyz_rest, times_rest, box_rest, angles_rest = reader.read()
yield lambda: eq(xyz_ref[i+1:], xyz_rest)
yield lambda: eq(times_ref[i+1:], times_rest)
yield lambda: eq(box_lengths_ref[i+1:], box_rest)
yield lambda: eq(box_angles_ref[i+1:], angles_rest)
yield lambda: len(xyz_ref) == i + len(xyz_rest)
def test_read_7():
'test two full read'
reader = DTRTrajectoryFile(fn_dtr)
xyz, times, cell_lens, cell_angles = reader.read()
xyz, times, cell_lens, cell_angles = reader.read()
eq(len(xyz), 0)
eq(len(times), 0)
eq(len(cell_lens), 0)
eq(len(cell_angles), 0)
def test_read_8():
with DTRTrajectoryFile(fn_dtr) as f:
xyz_ref, times_ref, box_lengths_ref, box_angles_ref = f.read()
with DTRTrajectoryFile(fn_dtr) as f:
xyz, times, box_lengths, box_angles = f.read(atom_indices=slice(None, None, 2))
yield lambda: eq(xyz_ref[:, ::2, :], xyz)
def test_write_1():
"test write"
xyz, times, cell_lens, cell_angles = DTRTrajectoryFile(fn_dtr).read()
xyz += 1
DTRTrajectoryFile(temp, 'w').write(xyz,cell_lengths=cell_lens,
cell_angles=cell_angles, times=times)
xyz2, times2, cell_lens2, cell_angles2 = DTRTrajectoryFile(temp).read()
eq(xyz, xyz2)
eq(times, times2)
eq(cell_lens, cell_lens2)
eq(cell_angles, cell_angles2)
def test_write_2():
"""
test two separate write call
"""
xyz, times, cell_lens, cell_angles = DTRTrajectoryFile(fn_dtr).read()
writer = DTRTrajectoryFile(temp, 'w')
writer.write(xyz,cell_lengths=cell_lens,
cell_angles=cell_angles, times=times)
n_frames = len(xyz)
times += 50.0
writer.write(xyz,cell_lengths=cell_lens,
cell_angles=cell_angles, times=times)
# # try to write frames with different number of atoms
# assert_raises(ValueError, writer.write, xyz[:,10:,:],
# cell_lengths=cell_lens,
# cell_angles=cell_angles,
# times=times)
writer.close()
xyz2, times2, cell_lens2, cell_angles2 = DTRTrajectoryFile(temp).read()
eq(len(xyz2), n_frames*2)
eq(xyz, xyz2[n_frames:])
eq(times, times2[n_frames:])
eq(cell_lens, cell_lens2[n_frames:])
eq(cell_angles, cell_angles2[n_frames:])
def test_write_3():
"test a random write operation"
xyz = np.array(np.random.uniform(low=-50, high=-50, size=(3, 17, 3)), dtype=np.float32)
times = np.array([1, 23.0, 48.0], dtype=np.float64)
cell_lengths=np.array(np.random.uniform(low=100, high=200, size=(3, 3)), dtype=np.float32)
cell_angles=np.array([[90, 90, 90],
[80, 100, 120],
[120, 90, 80]],
dtype=np.float32)
with DTRTrajectoryFile(temp, 'w') as f:
f.write(xyz, cell_lengths=cell_lengths,
cell_angles=cell_angles, times=times)
with DTRTrajectoryFile(temp) as f:
xyz2, times2, cell_lengths2, cell_angles2 = f.read()
eq(xyz, xyz2)
def test_write_4():
"test write error"
xyz = np.array(np.random.uniform(low=-50, high=-50, size=(3, 17, 3)), dtype=np.float32)
times = np.array([1, 23.0, 48.0], dtype=np.float64)
cell_lengths=np.array(np.random.uniform(low=100, high=200, size=(3, 3)), dtype=np.float32)
cell_angles=np.array([[90, 90, 90],
[80, 100, 120],
[120, 90, 80]],
dtype=np.float32)
bad_times = np.array([21, 3.0, 48.0], dtype=np.float64)
f = DTRTrajectoryFile(temp, 'w')
assert_raises(ValueError, f.write, xyz, cell_lengths=cell_lengths)
assert_raises(ValueError, f.write, xyz, cell_angles=cell_angles)
assert_raises(ValueError, f.write, xyz, times=times)
assert_raises(ValueError, f.write, xyz,
cell_lengths=cell_lengths,
cell_angles=cell_angles,
times=bad_times)
f.close()
# assert_raises(IOError, f.write, xyz,
# cell_lengths=cell_lengths,
# cell_angles=cell_angles,
# times=times)
def test_seek():
reference = DTRTrajectoryFile(fn_dtr).read()[0]
with DTRTrajectoryFile(fn_dtr) as f:
eq(f.tell(), 0)
eq(f.read(1)[0][0], reference[0])
eq(f.tell(), 1)
xyz = f.read(1)[0][0]
eq(xyz, reference[1])
eq(f.tell(), 2)
f.seek(0)
eq(f.tell(), 0)
xyz = f.read(1)[0][0]
eq(f.tell(), 1)
eq(xyz, reference[0])
f.seek(5)
eq(f.read(1)[0][0], reference[5])
eq(f.tell(), 6)
f.seek(-5, 1)
eq(f.tell(), 1)
eq(f.read(1)[0][0], reference[1])
@raises(IOError)
def test_read_closed():
f = DTRTrajectoryFile(fn_dtr)
f.close()
f.read()
# @raises(IOError)
# def test_write_closed():
# f = DTRTrajectoryFile(fn_dtr, 'w')
# f.close()
# xyz = np.array(np.random.uniform(low=-50, high=-50, size=(3, 17, 3)), dtype=np.float32)
# times = np.array([1, 23.0, 48.0], dtype=np.float64)
# cell_lengths=np.array(np.random.uniform(low=100, high=200, size=(3, 3)), dtype=np.float32)
# cell_angles=np.array([[90, 90, 90],
# [80, 100, 120],
# [120, 90, 80]],
# dtype=np.float32)
#
# f.write(xyz, cell_lengths=cell_lengths,
# cell_angles=cell_angles,
# times=times)
def test_tell():
with DTRTrajectoryFile(fn_dtr) as f:
last = len(f)
eq(f.tell(), 0)
f.read(2)
eq(f.tell(), 2)
f.read(100)
eq(f.tell(), 102)
f.seek(600)
eq(f.tell(), last)
test_read_7() | lgpl-2.1 | -1,750,320,178,704,864,000 | 32.537994 | 97 | 0.608719 | false | 3.044426 | true | false | false |
f5devcentral/f5-cccl | f5_cccl/resource/ltm/internal_data_group.py | 1 | 3015 | """Provides a class for managing BIG-IP iRule resources."""
# coding=utf-8
#
# Copyright (c) 2017-2021 F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from copy import deepcopy
import logging
from f5_cccl.resource import Resource
LOGGER = logging.getLogger(__name__)
def get_record_key(record):
"""Allows data groups to be sorted by the 'name' member."""
return record.get('name', '')
class InternalDataGroup(Resource):
"""InternalDataGroup class."""
# The property names class attribute defines the names of the
# properties that we wish to compare.
properties = dict(
name=None,
partition=None,
type=None,
records=list()
)
def __init__(self, name, partition, **data):
"""Create the InternalDataGroup"""
super(InternalDataGroup, self).__init__(name, partition)
self._data['type'] = data.get('type', '')
records = data.get('records', list())
self._data['records'] = sorted(records, key=get_record_key)
def __eq__(self, other_dg):
"""Check the equality of the two objects.
Only compare the properties as defined in the
properties class dictionany.
"""
if not isinstance(other_dg, InternalDataGroup):
return False
for key in self.properties:
if self._data[key] != other_dg.data.get(key, None):
return False
return True
def __hash__(self): # pylint: disable=useless-super-delegation
return super(InternalDataGroup, self).__hash__()
def _uri_path(self, bigip):
return bigip.tm.ltm.data_group.internals.internal
def __str__(self):
return str(self._data)
def update(self, bigip, data=None, modify=False):
"""Override of base class implemntation, required because data-groups
are picky about what data can exist in the object when modifying.
"""
tmp_copy = deepcopy(self)
tmp_copy.do_update(bigip, data, modify)
def do_update(self, bigip, data, modify):
"""Remove 'type' before doing the update."""
del self._data['type']
super(InternalDataGroup, self).update(
bigip, data=data, modify=modify)
class IcrInternalDataGroup(InternalDataGroup):
"""InternalDataGroup object created from the iControl REST object"""
pass
class ApiInternalDataGroup(InternalDataGroup):
"""InternalDataGroup object created from the API configuration object"""
pass
| apache-2.0 | 8,203,400,656,434,464,000 | 30.736842 | 77 | 0.659701 | false | 4.118852 | false | false | false |
saketkc/ribo-seq-snakemake | configs/config_Shalgi_et_al_Cell_2013.mouse.py | 1 | 1460 | GENOMES_DIR = '/home/cmb-panasas2/skchoudh/genomes'
OUT_DIR = '/staging/as/skchoudh/rna/September_2017_Shalgi_et_al_Cell_2013'
SRC_DIR = '/home/cmb-panasas2/skchoudh/github_projects/clip_seq_pipeline/scripts'
RAWDATA_DIR = '/home/cmb-06/as/skchoudh/dna/September_2017_Shalgi_et_al_Cell_2013/sra_single_end_mouse'
GENOME_BUILD = 'mm10'
GENOME_FASTA = GENOMES_DIR + '/' + GENOME_BUILD + '/fasta/'+ GENOME_BUILD+ '.fa'
STAR_INDEX = GENOMES_DIR + '/' + GENOME_BUILD + '/star_annotated'
GTF = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.vM11.annotation.without_rRNA_tRNA.gtf'
GENE_NAMES = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + GENOME_BUILD+'_gene_names_stripped.tsv'
GTF_UTR = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.vM11.gffutils.modifiedUTRs.gtf'
GENE_LENGTHS = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.vM11.coding_lengths.tsv' #+ GENOME_BUILD+'_gene_lengths.tsv'
HTSEQ_STRANDED = 'yes'
FEATURECOUNTS_S = '-s 1'
GENE_BED = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'mm10.vM11.genes.fromUCSC.bed' #+ GENOME_BUILD+'_gene_lengths.tsv'
START_CODON_BED = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.vM11.gffutils.start_codon.bed' #+ GENOME_BUILD+'_gene_lengths.tsv'
STOP_CODON_BED = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.vM11.gffutils.stop_codon.bed' #+ GENOME_BUILD+'_gene_lengths.tsv'
FEATURECOUNTS_T='CDS'
HTSEQ_MODE='intersection-strict'
| bsd-3-clause | -6,291,594,674,228,733,000 | 80.111111 | 146 | 0.7 | false | 2.409241 | false | true | false |
kanishkarj/Rave | Qt_Designer_files/playlist_design.py | 1 | 2791 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'playlist.ui'
#
# Created by: PyQt4 UI code generator 4.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_playlist(object):
def setupUi(self, playlist):
playlist.setObjectName(_fromUtf8("playlist"))
playlist.resize(451, 300)
self.gridLayout = QtGui.QGridLayout(playlist)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.mediaList = QtGui.QListWidget(playlist)
self.mediaList.setObjectName(_fromUtf8("mediaList"))
self.gridLayout.addWidget(self.mediaList, 1, 0, 1, 5)
self.listRearrange = QtGui.QPushButton(playlist)
self.listRearrange.setMinimumSize(QtCore.QSize(35, 35))
self.listRearrange.setMaximumSize(QtCore.QSize(35, 35))
self.listRearrange.setStyleSheet(_fromUtf8("border-radius:5em;"))
self.listRearrange.setText(_fromUtf8(""))
self.listRearrange.setObjectName(_fromUtf8("listRearrange"))
self.gridLayout.addWidget(self.listRearrange, 0, 3, 1, 1)
self.listRemove = QtGui.QPushButton(playlist)
self.listRemove.setMinimumSize(QtCore.QSize(35, 35))
self.listRemove.setMaximumSize(QtCore.QSize(35, 35))
self.listRemove.setStyleSheet(_fromUtf8("border-radius:5em;"))
self.listRemove.setText(_fromUtf8(""))
self.listRemove.setObjectName(_fromUtf8("listRemove"))
self.gridLayout.addWidget(self.listRemove, 0, 2, 1, 1)
self.listAdd = QtGui.QPushButton(playlist)
self.listAdd.setMinimumSize(QtCore.QSize(35, 35))
self.listAdd.setMaximumSize(QtCore.QSize(35, 35))
self.listAdd.setStyleSheet(_fromUtf8("border-radius:5em;"))
self.listAdd.setText(_fromUtf8(""))
self.listAdd.setObjectName(_fromUtf8("listAdd"))
self.gridLayout.addWidget(self.listAdd, 0, 1, 1, 1)
self.retranslateUi(playlist)
QtCore.QMetaObject.connectSlotsByName(playlist)
def retranslateUi(self, playlist):
playlist.setWindowTitle(_translate("playlist", "Playlist", None))
self.listRearrange.setToolTip(_translate("playlist", "Reorder", None))
self.listRemove.setToolTip(_translate("playlist", "Remove", None))
self.listAdd.setToolTip(_translate("playlist", "Add File", None))
| gpl-3.0 | 8,714,846,205,213,983,000 | 42.609375 | 79 | 0.694375 | false | 3.797279 | false | false | false |
belese/luciphone | Luciphone/modules/NFCmonitor.py | 1 | 1642 | import time
from py532lib.i2c import *
from py532lib.frame import *
from py532lib.constants import *
class NFCmonitor :
def __init__(self) :
self.cardIn = False
self.UUID = []
self.stopped = False
self.cbcardin = None
self.cbcardout = None
#Initialise NFC_reader
self.pn532 = Pn532_i2c()
self.pn532.SAMconfigure()
def registerCB(self,cbcardin = None,cbcardout = None):
self.cbcardin = cbcardin
self.cbcardout = cbcardout
def _trust_uid(self,uid) :
return uid == self.pn532.get_uid() and uid == self.pn532.get_uid()
def stop(self) :
self.stopped = True
def start(self) :
print ("NFC Monitor started")
while not self.stopped :
uid = self.pn532.get_uid()
if uid == self.UUID :
time.sleep(0.2)
elif uid and self._trust_uid(uid) :
print ("New Card Detected",uid)
self.UUID = uid
if not self.cardIn :
self.cardIn = True
if self.cbcardin : self.cbcardin(self.UUID)
elif not uid and self.cardIn and self._trust_uid(uid):
print ("Card Removed 2",self.UUID)
uuid = self.UUID
self.UUID = None
self.cardIn = False
if self.cbcardout : self.cbcardout(uuid)
NFC = NFCmonitor()
NFC.start()
| gpl-2.0 | 6,317,143,941,664,688,000 | 31.84 | 95 | 0.477467 | false | 3.956627 | false | false | false |
XBMC-Addons/plugin.library.node.editor | resources/lib/orderby.py | 1 | 8285 | # coding=utf-8
import os, sys
import xbmc, xbmcaddon, xbmcplugin, xbmcgui, xbmcvfs
import xml.etree.ElementTree as xmltree
from traceback import print_exc
from urllib.parse import unquote
from resources.lib.common import *
class OrderByFunctions():
def __init__(self, ltype):
self.ltype = ltype
def _load_rules( self ):
if self.ltype.startswith('video'):
overridepath = os.path.join( DEFAULTPATH , "videorules.xml" )
else:
overridepath = os.path.join( DEFAULTPATH , "musicrules.xml" )
try:
tree = xmltree.parse( overridepath )
return tree
except:
return None
def translateOrderBy( self, rule ):
# Load the rules
tree = self._load_rules()
hasValue = True
if rule[ 0 ] == "sorttitle":
rule[ 0 ] = "title"
if rule[ 0 ] != "random":
# Get the field we're ordering by
elems = tree.getroot().find( "matches" ).findall( "match" )
for elem in elems:
if elem.attrib.get( "name" ) == rule[ 0 ]:
match = xbmc.getLocalizedString( int( elem.find( "label" ).text ) )
else:
# We'll manually set for random
match = xbmc.getLocalizedString( 590 )
# Get localization of direction
direction = None
elems = tree.getroot().find( "orderby" ).findall( "type" )
for elem in elems:
if elem.text == rule[ 1 ]:
direction = xbmc.getLocalizedString( int( elem.attrib.get( "label" ) ) )
directionVal = rule[ 1 ]
if direction is None:
direction = xbmc.getLocalizedString( int( tree.getroot().find( "orderby" ).find( "type" ).attrib.get( "label" ) ) )
directionVal = tree.getroot().find( "orderby" ).find( "type" ).text
return [ [ match, rule[ 0 ] ], [ direction, directionVal ] ]
def displayOrderBy( self, actionPath):
try:
# Load the xml file
tree = xmltree.parse( unquote(actionPath) )
root = tree.getroot()
# Get the content type
content = root.find( "content" ).text
# Get the order node
orderby = root.find( "order" )
if orderby is None:
# There is no orderby element, so add one
self.newOrderBy( tree, actionPath )
orderby = root.find( "order" )
match = orderby.text
if "direction" in orderby.attrib:
direction = orderby.attrib.get( "direction" )
else:
direction = ""
translated = self.translateOrderBy( [match, direction ] )
listitem = xbmcgui.ListItem( label="%s" % ( translated[ 0 ][ 0 ] ) )
action = "plugin://plugin.library.node.editor?ltype=%s&type=editOrderBy&actionPath=" % self.ltype + actionPath + "&content=" + content + "&default=" + translated[0][1]
xbmcplugin.addDirectoryItem( int(sys.argv[ 1 ]), action, listitem, isFolder=False )
listitem = xbmcgui.ListItem( label="%s" % ( translated[ 1 ][ 0 ] ) )
action = "plugin://plugin.library.node.editor?ltype=%s&type=editOrderByDirection&actionPath=" % self.ltype + actionPath + "&default=" + translated[1][1]
xbmcplugin.addDirectoryItem( int(sys.argv[ 1 ]), action, listitem, isFolder=False )
xbmcplugin.setContent(int(sys.argv[1]), 'files')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
except:
print_exc()
def editOrderBy( self, actionPath, content, default ):
# Load all operator groups
tree = self._load_rules().getroot()
elems = tree.find( "matches" ).findall( "match" )
selectName = []
selectValue = []
# Find the matches for the content we've been passed
for elem in elems:
contentMatch = elem.find( content )
if contentMatch is not None:
selectName.append( xbmc.getLocalizedString( int( elem.find( "label" ).text ) ) )
selectValue.append( elem.attrib.get( "name" ) )
# Add a random element
selectName.append( xbmc.getLocalizedString( 590 ) )
selectValue.append( "random" )
# Let the user select an operator
selectedOperator = xbmcgui.Dialog().select( LANGUAGE( 30314 ), selectName )
# If the user selected no operator...
if selectedOperator == -1:
return
returnVal = selectValue[ selectedOperator ]
if returnVal == "title":
returnVal = "sorttitle"
self.writeUpdatedOrderBy( actionPath, field = returnVal )
def editDirection( self, actionPath, direction ):
# Load all directions
tree = self._load_rules().getroot()
elems = tree.find( "orderby" ).findall( "type" )
selectName = []
selectValue = []
# Find the group we've been passed and load its operators
for elem in elems:
selectName.append( xbmc.getLocalizedString( int( elem.attrib.get( "label" ) ) ) )
selectValue.append( elem.text )
# Let the user select an operator
selectedOperator = xbmcgui.Dialog().select( LANGUAGE( 30315 ), selectName )
# If the user selected no operator...
if selectedOperator == -1:
return
self.writeUpdatedOrderBy( actionPath, direction = selectValue[ selectedOperator ] )
def writeUpdatedOrderBy( self, actionPath, field = None, direction = None ):
# This function writes an updated orderby rule
try:
# Load the xml file
tree = xmltree.parse( unquote(unquote(actionPath)) )
root = tree.getroot()
# Get all the rules
orderby = root.find( "order" )
if field is not None:
orderby.text = field
if direction is not None:
orderby.set( "direction", direction )
# Save the file
self.indent( root )
tree.write( unquote(actionPath), encoding="UTF-8" )
except:
print_exc()
def newOrderBy( self, tree, actionPath ):
# This function adds a new OrderBy, with default match and direction
try:
# Load the xml file
#tree = xmltree.parse( actionPath )
root = tree.getroot()
# Get the content type
content = root.find( "content" )
if content is None:
xbmcgui.Dialog().ok( ADDONNAME, LANGUAGE( 30406 ) )
return
else:
content = content.text
# Find the default match for this content type
ruleTree = self._load_rules().getroot()
elems = ruleTree.find( "matches" ).findall( "match" )
match = "title"
for elem in elems:
contentCheck = elem.find( content )
if contentCheck is not None:
# We've found the first match for this type
match = elem.attrib.get( "name" )
break
if match == "title":
match = "sorttitle"
# Find the default direction
elem = ruleTree.find( "orderby" ).find( "type" )
direction = elem.text
# Write the new rule
newRule = xmltree.SubElement( root, "order" )
newRule.text = match
newRule.set( "direction", direction )
# Save the file
self.indent( root )
tree.write( unquote( actionPath ), encoding="UTF-8" )
except:
print_exc()
# in-place prettyprint formatter
def indent( self, elem, level=0 ):
i = "\n" + level*"\t"
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + "\t"
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
| gpl-2.0 | -1,985,181,185,056,589,800 | 41.487179 | 179 | 0.551358 | false | 4.222732 | false | false | false |
ScottWales/rose | lib/python/rose/scheme_handler.py | 1 | 5051 | # -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-5 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
#-----------------------------------------------------------------------------
"""Load and select from a group of related functional classes."""
from glob import glob
import inspect
import os
import sys
class SchemeHandlersManager(object):
"""Load and select from a group of related functional classes."""
CAN_HANDLE = "can_handle"
def __init__(self, paths, ns=None, attrs=None, can_handle=None,
*args, **kwargs):
"""Load modules in paths and initialise any classes with a SCHEME.
If "ns" is not None, only modules under the specified name-space in
paths are searched and imported. ("ns" should be a str in the form
"a.b", which will be converted as "a/b" for path search.)
Initialise each handler, and save it in self.handlers, which is a dict
of {scheme: handler, ...}.
If attrs is specified, it should be a list of attributes the class
has that do not have None values.
args and kwargs are passed as *args, **kwargs to the constructor of
each class. This manager will be passed to the constructor using the
kwargs["manager"].
Each handler class may have a SCHEMES attribute (a list of str) or a
SCHEME attribute with a str value, which will be used as the keys to
self.handlers of this manager.
Optionally, a handler may have a h.can_handle(scheme, **kwargs) method
that returns a boolean value to indicate whether it can handle a given
value.
"""
self.handlers = {}
if can_handle is None:
can_handle = self.CAN_HANDLE
self.can_handle = can_handle
cwd = os.getcwd()
ns_path = ""
if ns:
ns_path = os.path.join(*(ns.split("."))) + os.sep
for path in paths:
os.chdir(path) # assuming that "" is at the front of sys.path
sys.path.insert(0, path)
try:
kwargs["manager"] = self
for file_name in glob(ns_path + "*.py"):
if file_name.startswith("__"):
continue
mod_path = file_name[0:-3]
mod_name = mod_path.replace(os.sep, ".")
mod = __import__(mod_name, fromlist=[""])
members = inspect.getmembers(mod, inspect.isclass)
scheme0_default = None
if len(members) == 1:
scheme0_default = os.path.basename(mod_path)
for key, c in members:
if any([getattr(c, a, None) is None for a in attrs]):
continue
handler = None
scheme0 = getattr(c, "SCHEME", scheme0_default)
schemes = []
if scheme0 is not None:
schemes = [scheme0]
for scheme in getattr(c, "SCHEMES", schemes):
if self.handlers.has_key(scheme):
raise ValueError(c) # scheme already used
kwargs["manager"] = self
if handler is None:
handler = c(*args, **kwargs)
self.handlers[scheme] = handler
finally:
os.chdir(cwd)
sys.path.pop(0)
def get_handler(self, scheme):
"""Return the handler with a matching scheme.
Return None if there is no handler with a matching scheme.
"""
try:
if self.handlers.has_key(scheme):
return self.handlers[scheme]
except TypeError:
pass
def guess_handler(self, item):
"""Return a handler that can handle item.
Return None if there is no handler with a matching scheme.
"""
handler = self.get_handler(item)
if handler:
return handler
for handler in self.handlers.values():
can_handle = getattr(handler, self.can_handle, None)
if (callable(can_handle) and can_handle(item)):
return handler
| gpl-3.0 | 441,689,296,259,777,660 | 38.771654 | 78 | 0.545239 | false | 4.625458 | false | false | false |
hocinebendou/bika.gsoc | bika/lims/utils/analysis.py | 1 | 12208 | # -*- coding: utf-8 -*-
import math
import zope.event
from bika.lims.utils import formatDecimalMark
from Products.Archetypes.event import ObjectInitializedEvent
from Products.CMFCore.WorkflowCore import WorkflowException
from Products.CMFPlone.utils import _createObjectByType
def create_analysis(context, service, keyword, interim_fields):
# Determine if the sampling workflow is enabled
workflow_enabled = context.bika_setup.getSamplingWorkflowEnabled()
# Create the analysis
analysis = _createObjectByType("Analysis", context, keyword)
analysis.setService(service)
analysis.setInterimFields(interim_fields)
analysis.setMaxTimeAllowed(service.getMaxTimeAllowed())
analysis.unmarkCreationFlag()
analysis.reindexObject()
# Trigger the intitialization event of the new object
zope.event.notify(ObjectInitializedEvent(analysis))
# Perform the appropriate workflow action
try:
workflow_action = 'sampling_workflow' if workflow_enabled \
else 'no_sampling_workflow'
context.portal_workflow.doActionFor(analysis, workflow_action)
except WorkflowException:
# The analysis may have been transitioned already!
# I am leaving this code here though, to prevent regression.
pass
# Return the newly created analysis
return analysis
def get_significant_digits(numeric_value):
"""
Returns the precision for a given floatable value.
If value is None or not floatable, returns None.
Will return positive values if the result is below 0 and will
return 0 values if the result is above 0.
:param numeric_value: the value to get the precision from
:return: the numeric_value's precision
Examples:
numeric_value Returns
0 0
0.22 1
1.34 0
0.0021 3
0.013 2
2 0
22 0
"""
try:
numeric_value = float(numeric_value)
except ValueError:
return None
if numeric_value == 0:
return 0
significant_digit = int(math.floor(math.log10(abs(numeric_value))))
return 0 if significant_digit > 0 else abs(significant_digit)
def format_uncertainty(analysis, result, decimalmark='.', sciformat=1):
"""
Returns the formatted uncertainty according to the analysis, result
and decimal mark specified following these rules:
If the "Calculate precision from uncertainties" is enabled in
the Analysis service, and
a) If the the non-decimal number of digits of the result is above
the service's ExponentialFormatPrecision, the uncertainty will
be formatted in scientific notation. The uncertainty exponential
value used will be the same as the one used for the result. The
uncertainty will be rounded according to the same precision as
the result.
Example:
Given an Analysis with an uncertainty of 37 for a range of
results between 30000 and 40000, with an
ExponentialFormatPrecision equal to 4 and a result of 32092,
this method will return 0.004E+04
b) If the number of digits of the integer part of the result is
below the ExponentialFormatPrecision, the uncertainty will be
formatted as decimal notation and the uncertainty will be
rounded one position after reaching the last 0 (precision
calculated according to the uncertainty value).
Example:
Given an Analysis with an uncertainty of 0.22 for a range of
results between 1 and 10 with an ExponentialFormatPrecision
equal to 4 and a result of 5.234, this method will return 0.2
If the "Calculate precision from Uncertainties" is disabled in the
analysis service, the same rules described above applies, but the
precision used for rounding the uncertainty is not calculated from
the uncertainty neither the result. The fixed length precision is
used instead.
For further details, visit
https://jira.bikalabs.com/browse/LIMS-1334
If the result is not floatable or no uncertainty defined, returns
an empty string.
The default decimal mark '.' will be replaced by the decimalmark
specified.
:param analysis: the analysis from which the uncertainty, precision
and other additional info have to be retrieved
:param result: result of the analysis. Used to retrieve and/or
calculate the precision and/or uncertainty
:param decimalmark: decimal mark to use. By default '.'
:param sciformat: 1. The sci notation has to be formatted as aE^+b
2. The sci notation has to be formatted as ax10^b
3. As 2, but with super html entity for exp
4. The sci notation has to be formatted as a·10^b
5. As 4, but with super html entity for exp
By default 1
:return: the formatted uncertainty
"""
try:
result = float(result)
except ValueError:
return ""
objres = None
try:
objres = float(analysis.getResult())
except ValueError:
pass
service = analysis.getService()
uncertainty = None
if result == objres:
# To avoid problems with DLs
uncertainty = analysis.getUncertainty()
else:
uncertainty = analysis.getUncertainty(result)
if uncertainty is None or uncertainty == 0:
return ""
# Scientific notation?
# Get the default precision for scientific notation
threshold = service.getExponentialFormatPrecision()
# Current result precision is above the threshold?
sig_digits = get_significant_digits(result)
negative = sig_digits < 0
sign = '-' if negative else ''
sig_digits = abs(sig_digits)
sci = sig_digits >= threshold and sig_digits > 0
formatted = ''
if sci:
# Scientific notation
# 3.2014E+4
if negative == True:
res = float(uncertainty)*(10**sig_digits)
else:
res = float(uncertainty)/(10**sig_digits)
res = float(str("%%.%sf" % (sig_digits-1)) % res)
res = int(res) if res.is_integer() else res
if sciformat in [2,3,4,5]:
if sciformat == 2:
# ax10^b or ax10^-b
formatted = "%s%s%s%s" % (res,"x10^",sign,sig_digits)
elif sciformat == 3:
# ax10<super>b</super> or ax10<super>-b</super>
formatted = "%s%s%s%s%s" % (res,"x10<sup>",sign,sig_digits,"</sup>")
elif sciformat == 4:
# ax10^b or ax10^-b
formatted = "%s%s%s%s" % (res,"·10^",sign,sig_digits)
elif sciformat == 5:
# ax10<super>b</super> or ax10<super>-b</super>
formatted = "%s%s%s%s%s" % (res,"·10<sup>",sign,sig_digits,"</sup>")
else:
# Default format: aE^+b
sig_digits = "%02d" % sig_digits
formatted = "%s%s%s%s" % (res,"e",sign,sig_digits)
#formatted = str("%%.%se" % sig_digits) % uncertainty
else:
# Decimal notation
prec = analysis.getPrecision(result)
prec = prec if prec else ''
formatted = str("%%.%sf" % prec) % uncertainty
return formatDecimalMark(formatted, decimalmark)
def format_numeric_result(analysis, result, decimalmark='.', sciformat=1):
"""
Returns the formatted number part of a results value. This is
responsible for deciding the precision, and notation of numeric
values in accordance to the uncertainty. If a non-numeric
result value is given, the value will be returned unchanged.
The following rules apply:
If the "Calculate precision from uncertainties" is enabled in
the Analysis service, and
a) If the non-decimal number of digits of the result is above
the service's ExponentialFormatPrecision, the result will
be formatted in scientific notation.
Example:
Given an Analysis with an uncertainty of 37 for a range of
results between 30000 and 40000, with an
ExponentialFormatPrecision equal to 4 and a result of 32092,
this method will return 3.2092E+04
b) If the number of digits of the integer part of the result is
below the ExponentialFormatPrecision, the result will be
formatted as decimal notation and the resulta will be rounded
in accordance to the precision (calculated from the uncertainty)
Example:
Given an Analysis with an uncertainty of 0.22 for a range of
results between 1 and 10 with an ExponentialFormatPrecision
equal to 4 and a result of 5.234, this method will return 5.2
If the "Calculate precision from Uncertainties" is disabled in the
analysis service, the same rules described above applies, but the
precision used for rounding the result is not calculated from
the uncertainty. The fixed length precision is used instead.
For further details, visit
https://jira.bikalabs.com/browse/LIMS-1334
The default decimal mark '.' will be replaced by the decimalmark
specified.
:param analysis: the analysis from which the uncertainty, precision
and other additional info have to be retrieved
:param result: result to be formatted.
:param decimalmark: decimal mark to use. By default '.'
:param sciformat: 1. The sci notation has to be formatted as aE^+b
2. The sci notation has to be formatted as ax10^b
3. As 2, but with super html entity for exp
4. The sci notation has to be formatted as a·10^b
5. As 4, but with super html entity for exp
By default 1
:return: the formatted result
"""
try:
result = float(result)
except ValueError:
return result
# continuing with 'nan' result will cause formatting to fail.
if math.isnan(result):
return result
service = analysis.getService()
# Scientific notation?
# Get the default precision for scientific notation
threshold = service.getExponentialFormatPrecision()
# Current result precision is above the threshold?
sig_digits = get_significant_digits(result)
negative = sig_digits < 0
sign = '-' if negative else ''
sig_digits = abs(sig_digits)
sci = sig_digits >= threshold
formatted = ''
if sci:
# Scientific notation
if sciformat in [2,3,4,5]:
if negative == True:
res = float(result)*(10**sig_digits)
else:
res = float(result)/(10**sig_digits)
res = float(str("%%.%sf" % (sig_digits-1)) % res)
# We have to check if formatted is an integer using "'.' in formatted"
# because ".is_integer" doesn't work with X.0
res = int(res) if '.' not in res else res
if sciformat == 2:
# ax10^b or ax10^-b
formatted = "%s%s%s%s" % (res,"x10^",sign,sig_digits)
elif sciformat == 3:
# ax10<super>b</super> or ax10<super>-b</super>
formatted = "%s%s%s%s%s" % (res,"x10<sup>",sign,sig_digits,"</sup>")
elif sciformat == 4:
# ax10^b or ax10^-b
formatted = "%s%s%s%s" % (res,"·10^",sign,sig_digits)
elif sciformat == 5:
# ax10<super>b</super> or ax10<super>-b</super>
formatted = "%s%s%s%s%s" % (res,"·10<sup>",sign,sig_digits,"</sup>")
else:
# Default format: aE^+b
formatted = str("%%.%se" % sig_digits) % result
else:
# Decimal notation
prec = analysis.getPrecision(result)
prec = prec if prec else ''
formatted = str("%%.%sf" % prec) % result
# We have to check if formatted is an integer using "'.' in formatted"
# because ".is_integer" doesn't work with X.0
formatted = str(int(float(formatted))) if '.' not in formatted else formatted
return formatDecimalMark(formatted, decimalmark)
| mit | 269,186,533,421,347,600 | 39.138158 | 85 | 0.632028 | false | 4.242698 | false | false | false |
VerstandInvictus/NachIOs | ifthentrack.py | 1 | 1181 | ## Hook.io Nach tracker updater.
## This takes a query with parameters of ?val=<number>&tr=<Nach tracker ID>&sec=<secret word>
## and adds a data point to the Nach tracker with ID corresponding to the "tr" param.
## It is intended to be used with IFTTT's Maker channel action but could be triggered from anywhere.
## It is not authenticated because IFTTT doesn't really support HTTP auth;
## as a workaround it uses a secret word stored in Hook and fails if that is not a param.
## Not highly secure, but good enough for this application.
import requests
# to avoid publicizing API key, store it in your Hook env vars (hook.io/env).
apikey = Hook['env']['nachkey']
value = Hook['params']['val']
secret = Hook['params']['sec']
tracker = Hook['params']['tr']
# ditto - store a secret word or phrase in Hook env vars. This prevents open access to this hook.
magicword = Hook['env']['magicword']
# send the request
if secret == magicword:
url = 'https://nachapp.com/api/trackers/' + str(tracker) + '/measures'
r= requests.post(url, auth=(apikey, ''), verify=False, data= {"value":value})
print r.text
# <nedry>
else:
print "Ah ah ah! You didn't say the magic word!"
# </nedry>
| mit | 6,501,709,542,593,637,000 | 42.740741 | 100 | 0.711262 | false | 3.423188 | false | false | false |
kansanmuisti/datavaalit | web/political/views.py | 1 | 6227 | import json
import time
from django.template import RequestContext
from django.shortcuts import render_to_response
from social.models import *
from political.models import *
from political.api import *
from geo.models import Municipality
from django.core.urlresolvers import reverse
from django.core.mail import mail_admins
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.db.models import Count
from django.core.cache import cache
from django.template.defaultfilters import slugify
def show_candidates_social_feeds(request):
tw = {}
tw['feed_count'] = CandidateFeed.objects.filter(type="TW").count()
tw['update_count'] = Update.objects.filter(feed__type="TW").count()
fb = {}
fb['feed_count'] = CandidateFeed.objects.filter(type="FB").count()
fb['update_count'] = Update.objects.filter(feed__type="FB").count()
last_update = CandidateFeed.objects.filter(last_update__isnull=False).order_by('-last_update')[0].last_update
args = dict(tw=tw, fb=fb, last_update=last_update)
return render_to_response('political/candidate_social_feeds.html', args,
context_instance=RequestContext(request))
def candidate_change_request(request):
muni_list = []
for muni in Municipality.objects.all():
muni_list.append((muni.id, muni.name))
args = dict(muni_json=json.dumps(muni_list, ensure_ascii=False))
return render_to_response('political/candidate_change_request.html', args,
context_instance=RequestContext(request))
def _calc_submission_history(election, muni=None):
cache_key = 'party_budget'
if muni:
cache_key += '_%d' % muni.pk
ret = cache.get(cache_key)
if ret:
return ret
budget_list_base = CampaignBudget.objects.filter(candidate__election=election)
if muni:
budget_list = budget_list_base.filter(candidate__municipality=muni)
else:
budget_list = budget_list_base
party_list = []
for p in Party.objects.all():
d = {'id': p.pk, 'name': p.name, 'code': p.code, 'disclosure_data': []}
cand_list = Candidate.objects.filter(election=election, party=p)
if muni:
cand_list = cand_list.filter(municipality=muni)
d['num_candidates'] = cand_list.count()
# Filter out parties with no candidates
if not d['num_candidates']:
continue
party_list.append(d)
# Get the timestamps from all munis
timestamps = budget_list_base.order_by('time_submitted').values_list('time_submitted', flat=True).distinct()
for ts in timestamps:
ts_epoch = int(time.mktime(ts.timetuple()) * 1000)
for p in party_list:
nr_submitted = budget_list.filter(candidate__party=p['id'], time_submitted__lte=ts).count()
p['disclosure_data'].append((ts_epoch, nr_submitted))
ret = json.dumps(party_list, ensure_ascii=False)
cache.set(cache_key, ret, 3600)
return ret
def get_party_budget_data(request):
election = Election.objects.get(year=2012, type='muni')
muni = None
if 'municipality' in request.GET:
try:
muni = Municipality.objects.get(id=int(request.GET['municipality']))
except:
raise Http404()
ret = _calc_submission_history(election, muni)
return HttpResponse(ret, mimetype="application/javascript")
def _calc_prebudget_stats():
args = {}
timestamp = CampaignBudget.objects.order_by('-time_submitted')[0].time_submitted
election = Election.objects.get(year=2012, type='muni')
# Find the list of candidates that have submitted the campaign prebudgets
submitted_list = CampaignBudget.objects.filter(advance=True, candidate__election=election)
muni_list = Municipality.objects.annotate(num_candidates=Count('candidate')).filter(num_candidates__gt=0).order_by('name')
muni_dict = {}
for muni in muni_list:
muni_dict[muni.pk] = muni
muni.num_submitted = 0
# Calculate how many candidates have submitted the budgets per muni.
# Also figure out when the candidate first submitted the advance disclosure.
for budget in submitted_list:
muni = muni_dict[budget.candidate.municipality_id]
muni.num_submitted += 1
muni_dict = {}
total_cands = 0
total_submitted = 0
for muni in muni_list:
m = {'num_submitted': muni.num_submitted,
'num_candidates': muni.num_candidates,
'name': muni.name}
m['slug'] = slugify(muni.name)
muni_dict[muni.pk] = m
total_cands += muni.num_candidates
total_submitted += muni.num_submitted
args['num_candidates'] = total_cands
args['num_submitted'] = total_submitted
args['muni_json'] = json.dumps(muni_dict, indent=None, ensure_ascii=False)
args['timestamp'] = timestamp
return args
def show_prebudget_stats(request):
# The calculation takes a bit of time, so cache the results.
args = cache.get('muni_budget_stats')
if not args:
args = _calc_prebudget_stats()
cache.set('muni_budget_stats', args, 3600)
return render_to_response('political/candidate_budgets.html', args,
context_instance=RequestContext(request))
def candidate_change_request_form(request):
if request.method == 'GET':
return render_to_response('political/candidate_change_request_ok.html',
context_instance=RequestContext(request))
args = request.POST
try:
cand_id = int(args['candidate-id'])
request_str = args['request']
except:
return HttpResponseRedirect(reverse('political.views.candidate_change_request'))
try:
cand = Candidate.objects.get(pk=cand_id)
except Candidate.DoesNotExist:
return HttpResponseRedirect(reverse('political.views.candidate_change_request'))
subject = "Change request: %s" % unicode(cand)
message = """
Info
----
"""
message += "Candidate: %s\n" % unicode(cand)
message += "Request:\n%s" % unicode(request_str)
mail_admins(subject, message, fail_silently=False)
return HttpResponseRedirect(reverse('political.views.candidate_change_request_form'))
| agpl-3.0 | -3,575,229,711,720,876,500 | 38.411392 | 126 | 0.666131 | false | 3.584917 | false | false | false |
santegoeds/bfair | bfair/_types.py | 1 | 7481 | #!/usr/bin/env python
#
# Copyright 2011 Tjerk Santegoeds
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import izip
def _mk_class(name, attrs):
"""Creates a class similar to a namedtuple. These classes are compatible
with SQLAlchemy, however.
"""
class_ = type(name, (object,), {attr: None for attr in attrs})
class_.__slots__ = attrs
def __init__(self, *args, **kwargs):
for attr, val in izip(self.__slots__, args):
setattr(self, attr, val)
for k, v in kwargs.iteritems():
if k not in self.__slots__:
raise ValueError("%s : Invalid attribute" % k)
setattr(self, k, v)
def __repr__(self):
s = ", ".join("=".join((a, repr(getattr(self, a)))) for a in self.__slots__)
s = "".join(("<", type(self).__name__, "(", s, ")>"))
return s
def __str__(self):
return repr(self)
def __len__(self):
return len(self.__slots__)
def __getitem__(self, i):
return getattr(self, self.__slots__[i])
class_.__init__ = __init__
class_.__repr__ = __repr__
class_.__str__ = __str__
class_.__len__ = __len__
class_.__getitem__ = __getitem__
return class_
Market = _mk_class(
"Market", (
"marketId",
"marketName",
"marketType",
"marketStatus",
"marketTime",
"menuPath",
"eventHierarchy",
"betDelay",
"exchangeId",
"countryISO3",
"lastRefresh",
"numberOfRunners",
"numberOfWinners",
"matchedSize",
"bspMarket",
"turningInPlay",
)
)
EventInfo = _mk_class(
"EventInfo", (
"eventItems", # List of BFEvent
"eventParentId",
"marketItems", # List of MarketSummary
"couponLinks", # List of CouponLink
)
)
BFEvent = _mk_class(
"BFEvent", (
"eventId",
"eventName",
"eventTypeId",
"menuLevel",
"orderIndex",
"startTime",
"timezone",
)
)
MarketSummary = _mk_class(
"MarketSummary", (
"eventTypeId",
"marketId",
"marketName",
"marketType",
"marketTypeVariant",
"menuLevel",
"orderIndex",
"startTime",
"timezone",
"venue",
"betDelay",
"numberOfWinners",
"eventParentId",
"exchangeId",
)
)
Currency = _mk_class(
"Currency", (
"currencyCode",
"rateGBP",
"minimumStake",
"minimumStakeRange",
"minimumBSPLayLiability",
)
)
Currency.__str__ = lambda self: self.currencyCode
MarketPrices = _mk_class(
"MarketPrices", (
"marketId",
"currency",
"marketStatus",
"delay",
"numberOfWinners",
"marketInfo",
"discountAllowed",
"marketBaseRate",
"lastRefresh",
"removedRunners",
"bspMarket",
"runnerPrices",
)
)
EventType = _mk_class(
"EventType", (
"id",
"name",
"nextMarketId",
"exchangeId",
)
)
CouponLink = _mk_class(
"CouponLink", (
"couponId",
"couponName",
)
)
RunnerPrice = _mk_class(
"RunnerPrice", (
"selectionId",
"sortOrder",
"totalAmountMatched",
"lastPriceMatched",
"handicap",
"reductionFactor",
"vacant",
"farBSP",
"nearBSP",
"actualBSP",
"bestPricesToLay",
"bestPricesToBack",
"asianLineId",
)
)
Price = _mk_class(
"Price", (
"price",
"amountAvailable",
"betType",
"depth",
)
)
Runner = _mk_class(
"Runner", (
"asianLineId",
"handicap",
"name",
"selectionId",
)
)
RemovedRunner = _mk_class(
"RemovedRunner", (
"selection_name",
"removed_date",
"adjustment_factor"
)
)
BetInfo = _mk_class(
"BetInfo", (
"asianLineId",
"avgPrice",
"betCategoryType",
"betId",
"betPersistenceType",
"bspLiability",
"cancelledDate",
"executedBy",
"fullMarketName",
"handicap",
"lapsedDate",
"marketId",
"marketName",
"marketType",
"marketTypeVariant",
"matchedDate",
"matchedSize",
"matches",
"placedDate",
"price",
"profitAndLoss",
"remainingSize",
"requestedSize",
"selectionId",
"selectionName",
"settledDate",
"voidedDate",
)
)
PlaceBet = _mk_class(
"PlaceBet", (
"asianLineId",
"betCategoryType",
"betPersistenceType",
"betType",
"bspLiability",
"marketId",
"price",
"selectionId",
"size",
)
)
PlaceBetResult = _mk_class(
"PlaceBetResult", (
"averagePriceMatched",
"betId",
"resultCode",
"sizeMatched",
"success",
)
)
UpdateBet = _mk_class(
"UpdateBet", (
"betId",
"newBetPersistenceType",
"newPrice",
"newSize",
"oldBetPersistenceType",
"oldPrice",
"oldSize",
)
)
CancelBet = _mk_class(
"CancelBet", (
"betId",
)
)
Match = _mk_class(
"Match", (
"betStatus",
"matchedDate",
"priceMatched",
"profitLoss",
"settledDate",
"sizeMatched",
"transactionId",
"voidedDate",
)
)
MarketInfo = _mk_class(
"MarketInfo", (
'countryISO3',
'discountAllowed',
'eventTypeId',
'lastRefresh',
'marketBaseRate',
'marketDescription',
'marketDescriptionHasDate',
'marketDisplayTime',
'marketId',
'marketStatus',
'marketSuspendTime',
'marketTime',
'marketType',
'marketTypeVariant',
'menuPath',
'eventHierarchy',
'name',
'numberOfWinners',
'parentEventId',
'runners', # List of Runners
'unit',
'maxUnitValue',
'minUnitValue',
'interval',
'runnersMayBeAdded',
'timezone',
'licenceId',
'couponLinks', # List of CouponLink
'bspMarket',
)
)
MarketInfoLite = _mk_class(
"MarketInfoLite", (
"marketStatus",
"marketSuspendTime",
"marketTime",
"numberOfRunners",
"delay",
"reconciled",
"openForBspBetting",
)
)
VolumeInfo = _mk_class(
"VolumeInfo", (
"odds",
"totalMatchedAmount",
"totalBspBackMatchedAmount",
"totalBspMatchedAmount",
)
)
MarketTradedVolume = _mk_class(
"MarketTradedVolume", (
"priceItems",
"actualBSP",
)
)
MarketTradedVolume.reconciled = property(lambda self: self.actualBSP != 0.)
del _mk_class
| apache-2.0 | 4,085,828,044,796,257,000 | 19.439891 | 84 | 0.515573 | false | 3.714499 | false | false | false |
tuxskar/caluny | caluny/core/admin.py | 1 | 2473 | """Admin site registration models for Caluma"""
from django.contrib import admin
from .models import SemesterDate
from .models import Student, Course, Level, Exam, Timetable, CourseLabel, Degree
from .models import Subject, Teacher, TeachingSubject, School, University
@admin.register(Subject)
class SubjectAdmin(admin.ModelAdmin):
list_display = ('title', 'degree', 'level', 'description')
search_fields = ('code', 'title')
list_filter = ('degree',)
ordering = ('degree',)
@admin.register(Teacher)
class TeacherAdmin(admin.ModelAdmin):
pass
@admin.register(TeachingSubject)
class TeachingSubjectAdmin(admin.ModelAdmin):
list_display = ('subject', 'degree_info', 'course', 'start_date', 'end_date', 'address')
search_fields = ('subject__title',)
list_filter = ('course', 'subject__degree__title', 'address')
ordering = ('course',)
@admin.register(Student)
class StudentAdmin(admin.ModelAdmin):
pass
@admin.register(Course)
class CourseAdmin(admin.ModelAdmin):
search_fields = ('language', 'level', 'label')
list_filter = ('language', 'level')
ordering = ('level', 'label',)
@admin.register(Level)
class LevelAdmin(admin.ModelAdmin):
pass
@admin.register(Exam)
class ExamAdmin(admin.ModelAdmin):
list_display = ('title', 'degree_info', 'address', 'date', 'start_time', 'end_time', 'course_info')
search_fields = ('title',)
list_filter = ('date', 'address', 't_subject__subject__degree__title')
ordering = ('date',)
@admin.register(Timetable)
class TimetableAdmin(admin.ModelAdmin):
list_display = ('t_subject', 'degree_info', 'period', 'week_day', 'start_time', 'end_time')
search_fields = ('t_subject__subject__title',)
list_filter = ('week_day', 'period', 't_subject__subject__degree__title')
ordering = ('t_subject',)
@admin.register(School)
class SchoolAdmin(admin.ModelAdmin):
def get_queryset(self, request):
qs = super(SchoolAdmin, self).queryset(request)
if request.user.is_superuser:
return qs
# return qs.first()filter(owner=request.user)
# return qs.first()
return qs.filter(id=10)
@admin.register(University)
class UniversityAdmin(admin.ModelAdmin):
pass
@admin.register(CourseLabel)
class CourseLabelAdmin(admin.ModelAdmin):
pass
@admin.register(SemesterDate)
class SemesterDateAdmin(admin.ModelAdmin):
pass
@admin.register(Degree)
class DegreeAdmin(admin.ModelAdmin):
pass
| gpl-2.0 | 8,265,881,122,111,337,000 | 26.175824 | 103 | 0.68702 | false | 3.537911 | false | false | false |
frnhr/django-stdnumfield | testproject/testproject/settings.py | 1 | 3320 | """
Django settings for testproject project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "=t4f&0jd786fl_ri1$7z9)!iblzhv1r7f$9p&z4kol9zej*(q@"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"stdnumfield",
"testapp",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "testproject.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "testproject.wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
# fmt: off
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": (
"django.contrib.auth.password_validation"
".UserAttributeSimilarityValidator"
),
},
{
"NAME": (
"django.contrib.auth.password_validation"
".MinimumLengthValidator"
),
},
{
"NAME": (
"django.contrib.auth.password_validation"
".CommonPasswordValidator"
),
},
{
"NAME": (
"django.contrib.auth.password_validation"
".NumericPasswordValidator"
),
},
]
# fmt: on
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = "/static/"
| unlicense | -7,300,196,891,010,927,000 | 23.592593 | 79 | 0.654217 | false | 3.566058 | false | false | false |
talipovm/terse | terse/ReportGenerator/Charges.py | 1 | 3037 | from ReportGenerator.Top_ReportGenerator import Top_ReportGenerator
from ReportGenerator.Geom import Geom
import logging
log = logging.getLogger(__name__)
class Charges(Top_ReportGenerator):
def __init__(self,we,parsed):
self.Q = list()
super().__init__(we,parsed)
def prepare_for_report(self):
geom = Geom(self.we,self.parsed).geom
q_Mulliken = self.parsed.last_value('P_charges_Mulliken')
q_Lowdin = self.parsed.last_value('P_charges_Lowdin')
Q = (
(q_Mulliken, 'Mulliken'),
(self.combineH(q_Mulliken, geom), 'no_H'),
(q_Lowdin, 'Lowdin'),
(self.combineH(q_Lowdin, geom), 'no_H'),
)
self.Q = list((q,name) for q,name in Q if q is not None)
self.available = (len(list(self.Q))>0)
def combineH(self, q, geom):
if (geom is None) or (q is None) or ([atom for atom in geom if atom[0]!='H'] is None):
return None
out = [float(x) for x in q]
at_pairs = self.assignH(geom)
for i,j in at_pairs:
out[j] += out[i]
out[i] = 0
return [str(x) for x in out]
def assignH(self, geom):
return [(i,self.find_closest(i,geom)) for i,atom in enumerate(geom) if atom[0]=='H']
def find_closest(self,i,geom):
x,y,z = [float(q) for q in geom[i][1:]]
min_r2 = 1e6
min_j = 0
for j,at2 in enumerate(geom):
if at2[0]=='H' or i==j:
continue
x2,y2,z2 = [float(q) for q in at2[1:]]
r = (x2-x)**2 + (y2-y)**2 + (z2-z)**2
if r < min_r2:
min_r2 = r
min_j = j
return min_j
def charges_button(self, load_command, charges, name):
color_min, color_max = -1.0, 1.0
h_1 = h_2 = ""
if 'no_H' in name:
h_1 = "color atoms cpk; label off ; select not Hydrogen"
h_2 = "select all"
script_on = "; ".join([
"x='%(a)s'",
"DATA '%(p)s @x'",
"%(h_1)s",
"label %%.%(precision)s[%(p)s]",
#"color atoms %(p)s 'rwb' absolute %(col_min)f %(col_max)f",
"%(h_2)s"
]) % {
'a': " ".join(charges),
'p': 'property_' + name,
'precision': str(2),
'col_min': color_min,
'col_max': color_max,
'h_1': h_1,
'h_2': h_2
}
script_on ="; ".join([load_command,script_on])
return self.we.html_button(script_on, name)
def charges_button_off(self):
return self.we.html_button('label off;color atoms cpk', 'Off')
def button_bar(self, load_command):
if not self.available:
return ''
self.add_right('Charges: ')
for q,name in self.Q:
s = self.charges_button(load_command, q, name)
self.add_right(s)
self.add_right(self.charges_button_off())
self.add_right(self.br_tag)
return self.get_cells() | mit | 8,360,541,386,781,269,000 | 30.978947 | 94 | 0.498848 | false | 3.127703 | false | false | false |
oldm/OldMan | tests/attr_entry_test.py | 1 | 1916 | import unittest
from oldman.model.attribute import Entry
class AttributeEntryTest(unittest.TestCase):
def test_1(self):
entry = Entry()
value1 = 1
self.assertNotEquals(entry.current_value, value1)
entry.current_value = value1
self.assertEquals(entry.current_value, value1)
self.assertTrue(entry.has_changed())
self.assertEquals(entry.diff(), (None, value1))
self.assertTrue(entry.has_changed())
entry.receive_storage_ack()
self.assertFalse(entry.has_changed())
self.assertEquals(entry.current_value, value1)
#TODO: use a more precise exception
with self.assertRaises(Exception):
entry.diff()
value2 = 2
entry.current_value = value2
self.assertEquals(entry.current_value, value2)
self.assertTrue(entry.has_changed())
self.assertEquals(entry.diff(), (value1, value2))
entry.receive_storage_ack()
self.assertFalse(entry.has_changed())
self.assertEquals(entry.current_value, value2)
def test_boolean(self):
entry = Entry()
entry.current_value = False
self.assertTrue(entry.has_changed())
self.assertEquals(entry.diff(), (None, False))
entry.receive_storage_ack()
self.assertFalse(entry.has_changed())
entry.current_value = None
self.assertTrue(entry.has_changed())
self.assertEquals(entry.diff(), (False, None))
def test_clone(self):
value1 = [1]
value2 = {2}
e1 = Entry(value1)
e1.current_value = value2
self.assertEquals(e1.diff(), (value1, value2))
e2 = e1.clone()
self.assertEquals(e1.diff(), e2.diff())
value3 = {"f": "3"}
e1.current_value = value3
self.assertEquals(e1.diff(), (value1, value3))
self.assertEquals(e2.diff(), (value1, value2))
| bsd-3-clause | -1,405,675,632,503,099,600 | 27.176471 | 57 | 0.613779 | false | 3.855131 | true | false | false |
mitar/django-pushserver | setup.py | 1 | 1615 | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
try:
# Workaround for http://bugs.python.org/issue15881
import multiprocessing
except ImportError:
pass
VERSION = '0.3.4'
if __name__ == '__main__':
setup(
name = 'django-pushserver',
version = VERSION,
description = "Push server for Django based on Leo Ponomarev's Basic HTTP Push Relay Protocol.",
long_description = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
author = 'Mitar',
author_email = '[email protected]',
url = 'https://github.com/mitar/django-pushserver',
license = 'AGPLv3',
packages = find_packages(exclude=('*.tests', '*.tests.*', 'tests.*', 'tests')),
package_data = {},
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
include_package_data = True,
zip_safe = False,
dependency_links = [
'https://github.com/mitar/py-hbpush/tarball/0.1.4-mitar#egg=py_hbpush-0.1.4',
'http://github.com/clement/brukva/tarball/bff451511a3cc09cd52bebcf6372a59d36567827#egg=brukva-0.0.1',
],
install_requires = [
'Django>=1.2',
'py_hbpush==0.1.4',
'tornado<3',
],
)
| agpl-3.0 | -6,908,667,698,903,055,000 | 33.361702 | 113 | 0.570898 | false | 3.687215 | false | false | false |
JohanComparat/nbody-npt-functions | bin/bin_SMHMr/create_AGN_catalog_gawk.py | 1 | 2344 | # overall python packages
import glob
#import astropy.io.fits as fits
# 2397897 143.540054 0.032711 20.449619 119.370173 9.753314 33.197590 -1.000000 25.191960 40.977921 2 127
# ------ -------- -------- ra dec
import os
import time
import numpy as n
import sys
t0=time.time()
#from astropy.cosmology import FlatLambdaCDM
#import astropy.units as u
#cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
def get_AGN_catalog(env='MD10'):
# gets the file list to add the Xray luminosity
fileList = n.array(glob.glob(os.path.join(os.environ[env], "light-cone", "MDPL2_ROCKSTAR_FluxProj_*_000_AGN.dat" )))
fileList.sort()
#print fileList
#print fileList[0]
#data = n.loadtxt(fileList[0],unpack=True)
#print data, data.shape
#agn = (data[3] > 30 ) & (data[3] < 40 ) & (data[4] > 30 ) & (data[4] < 40 )
#data_all = data.T[agn]
#print data_all.shape
for path_2_input in fileList:
print path_2_input
path_2_output = os.path.join(os.environ[env], "light-cone", os.path.basename(path_2_input)[:-4]+".erosita-agn-window-100deg2.gawk.ascii")
gawk_command = """gawk ' {if ( $4 >= -5 && $4<=5 && $5 >= -5 && $5 <=5 ) print $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12}' """ + path_2_input +" > " + path_2_output
print(gawk_command)
os.system(gawk_command)
#data = n.loadtxt(fileName,unpack=True)
#print data.shape
## compute luminosity
#dL_cm = (cosmoMD.luminosity_distance(data[2]).to(u.cm)).value
#flux = 10**(data[9]-0.3) / (4.* n.pi * dL_cm * dL_cm)
#print dL_cm, flux
#agn = (flux > 1e-15 ) #& (data[2] < 2.44)
#print len(agn.nonzero()[0])
##data_all = n.vstack((data_all, data.T[agn]))
##print data_all.shape
#n.savetxt(, data.T[agn])
get_AGN_catalog(env='MD10')
print time.time()-t0, "seconds"
os.system("""cat header_agn.txt MDPL2_ROCKSTAR_FluxProj_*p_000_AGN.erosita-agn-window-100deg2.gawk.ascii > AGN.erosita-agn-window-100deg2.ascii""")
#os.system("""cat AGN.erosita-agn-window-100deg2.gawk.ascii > AGN.erosita-agn-window-100deg2-withHeader.ascii""")
| cc0-1.0 | 3,773,433,743,117,480,000 | 45.88 | 189 | 0.566126 | false | 2.712963 | false | false | false |
erdc/proteus | proteus/tests/surface_tension/rising_bubble_rans3p/vof_n.py | 1 | 1873 | from __future__ import absolute_import
from proteus import *
try:
from .risingBubble import *
from .vof_p import *
except:
from risingBubble import *
from vof_p import *
if timeDiscretization=='vbdf':
timeIntegration = VBDF
timeOrder=2
stepController = Min_dt_cfl_controller
elif timeDiscretization=='flcbdf':
timeIntegration = FLCBDF
#stepController = FLCBDF_controller
stepController = Min_dt_cfl_controller
time_tol = 10.0*vof_nl_atol_res
atol_u = {0:time_tol}
rtol_u = {0:time_tol}
else:
timeIntegration = BackwardEuler_cfl
stepController = Min_dt_cfl_controller
femSpaces = {0:pbasis}
massLumping = False
numericalFluxType = VOF3P.NumericalFlux
conservativeFlux = None
subgridError = VOF3P.SubgridError(coefficients=coefficients,nd=nd)
shockCapturing = VOF3P.ShockCapturing(coefficients,nd,shockCapturingFactor=vof_shockCapturingFactor,lag=vof_lag_shockCapturing)
if EXPLICIT_VOF==True:
fullNewtonFlag = False
timeIntegration = BackwardEuler_cfl
stepController = Min_dt_cfl_controller
else:
fullNewtonFlag = True
multilevelNonlinearSolver = Newton
levelNonlinearSolver = TwoStageNewton
nonlinearSmoother = None
linearSmoother = None
matrix = SparseMatrix
if useOldPETSc:
multilevelLinearSolver = PETSc
levelLinearSolver = PETSc
else:
multilevelLinearSolver = KSP_petsc4py
levelLinearSolver = KSP_petsc4py
if useSuperlu:
multilevelLinearSolver = LU
levelLinearSolver = LU
linear_solver_options_prefix = 'vof_'
nonlinearSolverConvergenceTest = 'rits'
levelNonlinearSolverConvergenceTest = 'rits'
linearSolverConvergenceTest = 'r-true'
tolFac = 0.0
nl_atol_res = vof_nl_atol_res
linTolFac = 0.0
l_atol_res = 0.1*vof_nl_atol_res
useEisenstatWalker = False
maxNonlinearIts = 50
maxLineSearches = 0
| mit | -7,382,594,697,588,394,000 | 24.657534 | 130 | 0.732515 | false | 2.977742 | false | true | false |
rwl/PyCIM | CIM14/IEC61970/Dynamics/BlockUsageOutputReference.py | 1 | 4616 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Core.IdentifiedObject import IdentifiedObject
class BlockUsageOutputReference(IdentifiedObject):
"""Used at instance level to tie the input of a referenced block to the output of another referenced block. Note that typically an input is only tied to an output of another block at the same PowerSystemResource, but there is no restriction to do so. If the output is implicity tied to an input, then the an instance of this class is not required. The sole purpose of this class is to explicitly tio the input of other blocks at the power system instance level.
"""
def __init__(self, block0=None, BlockUsageInputReference=None, metaBlockOutput0=None, *args, **kw_args):
"""Initialises a new 'BlockUsageOutputReference' instance.
@param block0:
@param BlockUsageInputReference: Can cross BlockUsage objects.
@param metaBlockOutput0:
"""
self._block0 = None
self.block0 = block0
self._BlockUsageInputReference = []
self.BlockUsageInputReference = [] if BlockUsageInputReference is None else BlockUsageInputReference
self._metaBlockOutput0 = None
self.metaBlockOutput0 = metaBlockOutput0
super(BlockUsageOutputReference, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["block0", "BlockUsageInputReference", "metaBlockOutput0"]
_many_refs = ["BlockUsageInputReference"]
def getblock0(self):
return self._block0
def setblock0(self, value):
if self._block0 is not None:
filtered = [x for x in self.block0.BlockUsageOutputReference if x != self]
self._block0._BlockUsageOutputReference = filtered
self._block0 = value
if self._block0 is not None:
if self not in self._block0._BlockUsageOutputReference:
self._block0._BlockUsageOutputReference.append(self)
block0 = property(getblock0, setblock0)
def getBlockUsageInputReference(self):
"""Can cross BlockUsage objects.
"""
return self._BlockUsageInputReference
def setBlockUsageInputReference(self, value):
for x in self._BlockUsageInputReference:
x.BlockUsageOutputReference = None
for y in value:
y._BlockUsageOutputReference = self
self._BlockUsageInputReference = value
BlockUsageInputReference = property(getBlockUsageInputReference, setBlockUsageInputReference)
def addBlockUsageInputReference(self, *BlockUsageInputReference):
for obj in BlockUsageInputReference:
obj.BlockUsageOutputReference = self
def removeBlockUsageInputReference(self, *BlockUsageInputReference):
for obj in BlockUsageInputReference:
obj.BlockUsageOutputReference = None
def getmetaBlockOutput0(self):
return self._metaBlockOutput0
def setmetaBlockOutput0(self, value):
if self._metaBlockOutput0 is not None:
filtered = [x for x in self.metaBlockOutput0.blockUsageOutputReference0 if x != self]
self._metaBlockOutput0._blockUsageOutputReference0 = filtered
self._metaBlockOutput0 = value
if self._metaBlockOutput0 is not None:
if self not in self._metaBlockOutput0._blockUsageOutputReference0:
self._metaBlockOutput0._blockUsageOutputReference0.append(self)
metaBlockOutput0 = property(getmetaBlockOutput0, setmetaBlockOutput0)
| mit | -6,126,209,542,516,952,000 | 42.961905 | 468 | 0.717071 | false | 4.375355 | false | false | false |
O-T-L/PyOptimization | parameters/indicator/pf.py | 1 | 3441 | """
Copyright (C) 2014, 申瑞珉 (Ruimin Shen)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import os
import math
import numpy
import pyotl.utility
import pyoptimization.utility
_pfs = {}
def get_pf(path):
if path in _pfs:
return _pfs[path]
else:
pf = numpy.loadtxt(path, ndmin=2)
_pf = pyotl.utility.PyListList2VectorVector_Real(pf.tolist())
_pfs[path] = _pf
return _pf
def pf(config, properties, folder='PF'):
path = os.path.join(pyoptimization.utility.get_pyoptimization_path(config), 'Data', folder)
if re.match('^ZDT[14]$', properties['problem']):
return get_pf(os.path.join(path, 'ZDT1.csv'))
elif re.match('^ZDT[26]$', properties['problem']):
return get_pf(os.path.join(path, 'ZDT2.csv'))
elif re.match('^ZDT3$', properties['problem']):
return get_pf(os.path.join(path, 'ZDT3.csv'))
elif re.match('^ZDT5$', properties['problem']):
return get_pf(os.path.join(path, 'ZDT5.csv'))
elif re.match('^UF(\d|10)$', properties['problem']):
return get_pf(os.path.join(path, properties['problem'] + '.csv'))
elif properties['problem'] == 'DTLZ1':
return get_pf(os.path.join(path, 'DTLZ1', str(properties['objectives']) + '.csv'))
elif re.match('^DTLZ[234]$', properties['problem']):
if properties['objectives'] == 2:
return get_pf(os.path.join(path, 'DTLZ5', str(properties['objectives']) + '.csv'))
else:
return get_pf(os.path.join(path, 'DTLZ2', str(properties['objectives']) + '.csv'))
elif re.match('^DTLZ[56]$', properties['problem']):
return get_pf(os.path.join(path, 'DTLZ5', str(properties['objectives']) + '.csv'))
elif properties['problem'] == 'DTLZ7':
return get_pf(os.path.join(path, 'DTLZ7', str(properties['objectives']) + '.csv'))
elif re.match('^DTLZ[56]I$', properties['problem']):
return get_pf(os.path.join(path, 'DTLZ5I', '%u_%u.csv' % (properties['objectives'], properties['DTLZ_I'])))
elif re.match('^ScaledDTLZ[234]$', properties['problem']):
pf = numpy.loadtxt(os.path.join(path, 'DTLZ2', str(properties['objectives']) + '.csv'))
for i, col in enumerate(pf.T):
col *= math.pow(10, i)
return pf
elif re.match('^WFG[1-3]$', properties['problem']):
return get_pf(os.path.join(path, properties['problem'], str(properties['objectives']) + '.csv'))
elif re.match('^WFG[4-9]$', properties['problem']):
return get_pf(os.path.join(path, 'WFG4', str(properties['objectives']) + '.csv'))
try:
return get_pf(os.path.join(path, properties['problem'] + '.csv'))
except:
try:
return get_pf(os.path.join(path, properties['problem'], str(properties['objectives']) + '.csv'))
except:
raise Exception(properties, folder)
| lgpl-3.0 | -4,490,129,093,049,562,600 | 43.038462 | 115 | 0.63901 | false | 3.29023 | false | false | false |
YeEmrick/learning | stanford-tensorflow/assignments/01/q1_sol.py | 1 | 5009 | """
Solution to simple exercises to get used to TensorFlow API
You should thoroughly test your code.
TensorFlow's official documentation should be your best friend here
CS20: "TensorFlow for Deep Learning Research"
cs20.stanford.edu
Created by Chip Huyen ([email protected])
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
sess = tf.InteractiveSession()
###############################################################################
# 1a: Create two random 0-d tensors x and y of any distribution.
# Create a TensorFlow object that returns x + y if x > y, and x - y otherwise.
# Hint: look up tf.cond()
# I do the first problem for you
###############################################################################
x = tf.random_uniform([]) # Empty array as shape creates a scalar.
y = tf.random_uniform([])
out = tf.cond(tf.greater(x, y), lambda: tf.add(x, y), lambda: tf.subtract(x, y))
###############################################################################
# 1b: Create two 0-d tensors x and y randomly selected from the range [-1, 1).
# Return x + y if x < y, x - y if x > y, 0 otherwise.
# Hint: Look up tf.case().
###############################################################################
x = tf.random_uniform([], -1, 1, dtype=tf.float32)
y = tf.random_uniform([], -1, 1, dtype=tf.float32)
out = tf.case({tf.less(x, y): lambda: tf.add(x, y),
tf.greater(x, y): lambda: tf.subtract(x, y)},
default=lambda: tf.constant(0.0), exclusive=True)
###############################################################################
# 1c: Create the tensor x of the value [[0, -2, -1], [0, 1, 2]]
# and y as a tensor of zeros with the same shape as x.
# Return a boolean tensor that yields Trues if x equals y element-wise.
# Hint: Look up tf.equal().
###############################################################################
x = tf.constant([[0, -2, -1], [0, 1, 2]])
y = tf.zeros_like(x)
out = tf.equal(x, y)
###############################################################################
# 1d: Create the tensor x of value
# [29.05088806, 27.61298943, 31.19073486, 29.35532951,
# 30.97266006, 26.67541885, 38.08450317, 20.74983215,
# 34.94445419, 34.45999146, 29.06485367, 36.01657104,
# 27.88236427, 20.56035233, 30.20379066, 29.51215172,
# 33.71149445, 28.59134293, 36.05556488, 28.66994858].
# Get the indices of elements in x whose values are greater than 30.
# Hint: Use tf.where().
# Then extract elements whose values are greater than 30.
# Hint: Use tf.gather().
###############################################################################
x = tf.constant([29.05088806, 27.61298943, 31.19073486, 29.35532951,
30.97266006, 26.67541885, 38.08450317, 20.74983215,
34.94445419, 34.45999146, 29.06485367, 36.01657104,
27.88236427, 20.56035233, 30.20379066, 29.51215172,
33.71149445, 28.59134293, 36.05556488, 28.66994858])
indices = tf.where(x > 30)
out = tf.gather(x, indices)
###############################################################################
# 1e: Create a diagnoal 2-d tensor of size 6 x 6 with the diagonal values of 1,
# 2, ..., 6
# Hint: Use tf.range() and tf.diag().
###############################################################################
values = tf.range(1, 7)
out = tf.diag(values)
###############################################################################
# 1f: Create a random 2-d tensor of size 10 x 10 from any distribution.
# Calculate its determinant.
# Hint: Look at tf.matrix_determinant().
###############################################################################
m = tf.random_normal([10, 10], mean=10, stddev=1)
out = tf.matrix_determinant(m)
###############################################################################
# 1g: Create tensor x with value [5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9].
# Return the unique elements in x
# Hint: use tf.unique(). Keep in mind that tf.unique() returns a tuple.
###############################################################################
x = tf.constant([5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9])
unique_values, indices = tf.unique(x)
###############################################################################
# 1h: Create two tensors x and y of shape 300 from any normal distribution,
# as long as they are from the same distribution.
# Use tf.cond() to return:
# - The mean squared error of (x - y) if the average of all elements in (x - y)
# is negative, or
# - The sum of absolute value of all elements in the tensor (x - y) otherwise.
# Hint: see the Huber loss function in the lecture slides 3.
###############################################################################
x = tf.random_normal([300], mean=5, stddev=1)
y = tf.random_normal([300], mean=5, stddev=1)
average = tf.reduce_mean(x - y)
def f1(): return tf.reduce_mean(tf.square(x - y))
def f2(): return tf.reduce_sum(tf.abs(x - y))
out = tf.cond(average < 0, f1, f2) | apache-2.0 | -4,323,550,377,722,852,000 | 43.336283 | 80 | 0.498503 | false | 3.47124 | false | false | false |
Noirello/bonsai | .ci/delay.py | 1 | 5135 | import os
import subprocess
import xmlrpc.server as rpc
import time
import sys
import multiprocessing as mp
try:
import pydivert
except ImportError:
pass
class LinuxDelayHandler:
@staticmethod
def get_interface_name():
""" Get the first interface name that is not the localhost. """
net = os.listdir("/sys/class/net")
net.remove("lo")
if "eth0" in net:
return "eth0"
return net[0]
def set_delay(self, sec, duration=10.0):
""" Set network delay, return with the call's result. """
try:
subprocess.check_call(
[
"tc",
"qdisc",
"add",
"dev",
self.get_interface_name(),
"root",
"handle",
"1:",
"prio",
]
)
subprocess.check_call(
[
"tc",
"qdisc",
"add",
"dev",
self.get_interface_name(),
"parent",
"1:3",
"handle",
"30:",
"netem",
"delay",
("%dmsec" % (sec * 1000)),
]
)
for port in ("389", "636"):
subprocess.check_call(
[
"tc",
"filter",
"add",
"dev",
self.get_interface_name(),
"protocol",
"ip",
"parent",
"1:0",
"u32",
"match",
"ip",
"sport",
port,
"0xffff",
"flowid",
"1:3",
]
)
return True
except subprocess.CalledProcessError:
return False
def remove_delay(self):
""" Remove network delay. """
try:
subprocess.check_call(
["tc", "qdisc", "del", "dev", self.get_interface_name(), "root"]
)
return True
except subprocess.CalledProcessError:
return False
class MacDelayHandler:
def set_delay(self, sec, duration=10.0):
with open("/etc/pf.conf") as fp:
conf = fp.read()
conf += '\ndummynet-anchor "mop"\nanchor "mop"\n'
rule = (
"dummynet in quick proto tcp from any to any port {389, 636} pipe 1\n"
)
try:
subprocess.run(
["pfctl", "-f", "-"], input=conf, encoding="utf-8", check=True
)
subprocess.run(
["pfctl", "-a", "mop", "-f", "-"],
input=rule,
encoding="utf-8",
check=True,
)
subprocess.check_call(
["dnctl", "pipe", "1", "config", "delay", "%d" % int(sec * 1000)]
)
return True
except subprocess.CalledProcessError:
return False
def remove_delay(self):
try:
subprocess.check_call(["dnctl", "-q", "flush"])
subprocess.check_call(["pfctl", "-f", "/etc/pf.conf"])
return True
except subprocess.CalledProcessError:
return False
class WinDelayHandler:
proc = None
def delay(self, sec, duration=10.0):
netfil = "tcp.DstPort == 389 or tcp.SrcPort == 389"
start = time.time()
with pydivert.WinDivert(netfil) as divert:
for packet in divert:
time.sleep(sec)
divert.send(packet)
if time.time() - start >= duration:
break
def set_delay(self, sec, duration=10.0):
""" Set network delay, return with the call's result. """
self.proc = mp.Process(target=self.delay, args=(sec, duration))
self.proc.start()
return True
def remove_delay(self):
""" Remove network delay, return with the call's result. """
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
return True
if __name__ == "__main__":
if sys.platform == "win32":
handler = WinDelayHandler()
elif sys.platform == "darwin":
handler = MacDelayHandler()
else:
handler = LinuxDelayHandler()
# Fix network collapse on certain Linux distros.
subprocess.call(
["ip", "link", "set", handler.get_interface_name(), "qlen", "1000"]
)
server = rpc.SimpleXMLRPCServer(("0.0.0.0", 8000))
server.register_function(handler.set_delay, "set_delay")
server.register_function(handler.remove_delay, "remove_delay")
server.serve_forever()
| mit | 509,284,448,804,491,300 | 29.02924 | 86 | 0.427848 | false | 4.655485 | false | false | false |
Karspexet/Karspexet | karspexet/ticket/tasks.py | 1 | 1458 | import logging
from django.conf import settings
from django.core.mail import send_mail
from django.contrib.sites.models import Site
from django.template.loader import render_to_string
logger = logging.getLogger(__file__)
def send_ticket_email_to_customer(reservation, email, name=None):
'''Send an email to the customer with a link to their tickets
If the supplied email is empty, this will silently fail. The reason for this is that this is used in the payment
flow, and if we raise an error here, it will crash the payment transaction, and at that point we have likely
charged someone's card without giving them tickets.
Therefore the trade-off is made that if the customer fails to provide a valid email address, they will not receive
an email. They will however, have another chance to send the reservation information via email at the
reservation-detail page.
'''
if not email:
return
if not name:
name = email
to_address = f'{name} <{email}>'
subject = 'Dina biljetter till Kårspexet'
site = Site.objects.get_current()
reservation_url = f'https://{site.domain}{reservation.get_absolute_url()}'
body = render_to_string('reservation_email.txt', {
'reservation': reservation,
'url': reservation_url,
})
send_mail(
subject,
body,
settings.TICKET_EMAIL_FROM_ADDRESS,
[to_address],
fail_silently=False,
)
| mit | 7,935,162,058,853,896,000 | 33.690476 | 118 | 0.693892 | false | 4.081232 | false | false | false |
jonparrott/nox | nox/command.py | 1 | 3925 | # Copyright 2016 Alethea Katherine Flowers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from typing import Any, Iterable, List, Optional, Sequence, Union
import py
from nox.logger import logger
from nox.popen import popen
class CommandFailed(Exception):
"""Raised when an executed command returns a non-success status code."""
def __init__(self, reason: str = None) -> None:
super(CommandFailed, self).__init__(reason)
self.reason = reason
def which(program: str, paths: Optional[List[str]]) -> str:
"""Finds the full path to an executable."""
full_path = None
if paths:
full_path = py.path.local.sysfind(program, paths=paths)
if full_path:
return full_path.strpath
full_path = py.path.local.sysfind(program)
if full_path:
return full_path.strpath
logger.error("Program {} not found.".format(program))
raise CommandFailed("Program {} not found".format(program))
def _clean_env(env: Optional[dict]) -> Optional[dict]:
if env is None:
return None
clean_env = {}
# Ensure systemroot is passed down, otherwise Windows will explode.
clean_env["SYSTEMROOT"] = os.environ.get("SYSTEMROOT", "")
clean_env.update(env)
return clean_env
def run(
args: Sequence[str],
*,
env: Optional[dict] = None,
silent: bool = False,
paths: Optional[List[str]] = None,
success_codes: Optional[Iterable[int]] = None,
log: bool = True,
external: bool = False,
**popen_kws: Any
) -> Union[str, bool]:
"""Run a command-line program."""
if success_codes is None:
success_codes = [0]
cmd, args = args[0], args[1:]
full_cmd = "{} {}".format(cmd, " ".join(args))
cmd_path = which(cmd, paths)
if log:
logger.info(full_cmd)
is_external_tool = paths is not None and not any(
cmd_path.startswith(path) for path in paths
)
if is_external_tool:
if external == "error":
logger.error(
"Error: {} is not installed into the virtualenv, it is located at {}. "
"Pass external=True into run() to explicitly allow this.".format(
cmd, cmd_path
)
)
raise CommandFailed("External program disallowed.")
elif external is False:
logger.warning(
"Warning: {} is not installed into the virtualenv, it is located at {}. This might cause issues! "
"Pass external=True into run() to silence this message.".format(
cmd, cmd_path
)
)
env = _clean_env(env)
try:
return_code, output = popen(
[cmd_path] + list(args), silent=silent, env=env, **popen_kws
)
if return_code not in success_codes:
logger.error(
"Command {} failed with exit code {}{}".format(
full_cmd, return_code, ":" if silent else ""
)
)
if silent:
sys.stderr.write(output)
raise CommandFailed("Returned code {}".format(return_code))
if output:
logger.output(output)
return output if silent else True
except KeyboardInterrupt:
logger.error("Interrupted...")
raise
| apache-2.0 | -6,808,519,644,293,755,000 | 28.074074 | 118 | 0.593376 | false | 4.179979 | false | false | false |
pombredanne/discern | tastypie/resources.py | 1 | 94958 | from __future__ import with_statement
import sys
import logging
import warnings
import django
from django.conf import settings
try:
from django.conf.urls import patterns, url
except ImportError: # Django < 1.4
from django.conf.urls.defaults import patterns, url
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, ValidationError
from django.core.urlresolvers import NoReverseMatch, reverse, resolve, Resolver404, get_script_prefix
from django.core.signals import got_request_exception
from django.db import transaction
from django.db.models.sql.constants import QUERY_TERMS
from django.http import HttpResponse, HttpResponseNotFound, Http404
from django.utils.cache import patch_cache_control, patch_vary_headers
from tastypie.authentication import Authentication
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.cache import NoCache
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import NotFound, BadRequest, InvalidFilterError, HydrationError, InvalidSortError, ImmediateHttpResponse, Unauthorized
from tastypie import fields
from tastypie import http
from tastypie.paginator import Paginator
from tastypie.serializers import Serializer
from tastypie.throttle import BaseThrottle
from tastypie.utils import is_valid_jsonp_callback_value, dict_strip_unicode_keys, trailing_slash
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.validation import Validation
try:
set
except NameError:
from sets import Set as set
# copycompat deprecated in Django 1.5. If python version is at least 2.5, it
# is safe to use the native python copy module.
# The ``copy`` module became function-friendly in Python 2.5 and
# ``copycompat`` was added in post 1.1.1 Django (r11901)..
if sys.version_info >= (2,5):
try:
from copy import deepcopy
except ImportError:
from django.utils.copycompat import deepcopy
else:
# For python older than 2.5, we must be running a version of Django before
# copycompat was deprecated.
try:
from django.utils.copycompat import deepcopy
except ImportError:
from copy import deepcopy
# If ``csrf_exempt`` isn't present, stub it.
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
def csrf_exempt(func):
return func
# Django 1.5 has moved this constant up one level.
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError:
from django.db.models.sql.constants import LOOKUP_SEP
class NOT_AVAILABLE:
def __str__(self):
return 'No such data is available.'
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
serializer = Serializer()
authentication = Authentication()
authorization = ReadOnlyAuthorization()
cache = NoCache()
throttle = BaseThrottle()
validation = Validation()
paginator_class = Paginator
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
max_limit = 1000
api_name = None
resource_name = None
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
collection_name = 'objects'
detail_uri_name = 'pk'
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
return object.__new__(type('ResourceOptions', (cls,), overrides))
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
# Inherit any fields from parent(s).
try:
parents = [b for b in bases if issubclass(b, Resource)]
# Simulate the MRO.
parents.reverse()
for p in parents:
parent_fields = getattr(p, 'base_fields', {})
for field_name, field_object in parent_fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
except NameError:
pass
for field_name, obj in attrs.items():
# Look for ``dehydrated_type`` instead of doing ``isinstance``,
# which can break down if Tastypie is re-namespaced as something
# else.
if hasattr(obj, 'dehydrated_type'):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = ResourceOptions(opts)
if not getattr(new_class._meta, 'resource_name', None):
# No ``resource_name`` provided. Attempt to auto-name the resource.
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split('Resource') if bit]
resource_name = ''.join(name_bits).lower()
new_class._meta.resource_name = resource_name
if getattr(new_class._meta, 'include_resource_uri', True):
if not 'resource_uri' in new_class.base_fields:
new_class.base_fields['resource_uri'] = fields.CharField(readonly=True)
elif 'resource_uri' in new_class.base_fields and not 'resource_uri' in attrs:
del(new_class.base_fields['resource_uri'])
for field_name, field_object in new_class.base_fields.items():
if hasattr(field_object, 'contribute_to_class'):
field_object.contribute_to_class(new_class, field_name)
return new_class
class Resource(object):
"""
Handles the data, request dispatch and responding to requests.
Serialization/deserialization is handled "at the edges" (i.e. at the
beginning/end of the request/response cycle) so that everything internally
is Python data structures.
This class tries to be non-model specific, so it can be hooked up to other
data sources, such as search results, files, other data, etc.
"""
__metaclass__ = DeclarativeMetaclass
def __init__(self, api_name=None):
self.fields = deepcopy(self.base_fields)
if not api_name is None:
self._meta.api_name = api_name
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError(name)
def wrap_view(self, view):
"""
Wraps methods so they can be called in a more functional way as well
as handling exceptions better.
Note that if ``BadRequest`` or an exception with a ``response`` attr
are seen, there is special handling to either present a message back
to the user or return the response traveling with the exception.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
try:
callback = getattr(self, view)
response = callback(request, *args, **kwargs)
# Our response can vary based on a number of factors, use
# the cache class to determine what we should ``Vary`` on so
# caches won't return the wrong (cached) version.
varies = getattr(self._meta.cache, "varies", [])
if varies:
patch_vary_headers(response, varies)
if self._meta.cache.cacheable(request, response):
if self._meta.cache.cache_control():
# If the request is cacheable and we have a
# ``Cache-Control`` available then patch the header.
patch_cache_control(response, **self._meta.cache.cache_control())
if request.is_ajax() and not response.has_header("Cache-Control"):
# IE excessively caches XMLHttpRequests, so we're disabling
# the browser cache here.
# See http://www.enhanceie.com/ie/bugs.asp for details.
patch_cache_control(response, no_cache=True)
return response
except (BadRequest, fields.ApiFieldError), e:
data = {"error": e.args[0] if getattr(e, 'args') else ''}
return self.error_response(request, data, response_class=http.HttpBadRequest)
except ValidationError, e:
data = {"error": e.messages}
return self.error_response(request, data, response_class=http.HttpBadRequest)
except Exception, e:
if hasattr(e, 'response'):
return e.response
# A real, non-expected exception.
# Handle the case where the full traceback is more helpful
# than the serialized error.
if settings.DEBUG and getattr(settings, 'TASTYPIE_FULL_DEBUG', False):
raise
# Re-raise the error to get a proper traceback when the error
# happend during a test case
if request.META.get('SERVER_NAME') == 'testserver':
raise
# Rather than re-raising, we're going to things similar to
# what Django does. The difference is returning a serialized
# error message.
return self._handle_500(request, e)
return wrapper
def _handle_500(self, request, exception):
import traceback
import sys
the_trace = '\n'.join(traceback.format_exception(*(sys.exc_info())))
response_class = http.HttpApplicationError
response_code = 500
NOT_FOUND_EXCEPTIONS = (NotFound, ObjectDoesNotExist, Http404)
if isinstance(exception, NOT_FOUND_EXCEPTIONS):
response_class = HttpResponseNotFound
response_code = 404
if settings.DEBUG:
data = {
"error_message": unicode(exception),
"traceback": the_trace,
}
return self.error_response(request, data, response_class=response_class)
# When DEBUG is False, send an error message to the admins (unless it's
# a 404, in which case we check the setting).
send_broken_links = getattr(settings, 'SEND_BROKEN_LINK_EMAILS', False)
if not response_code == 404 or send_broken_links:
log = logging.getLogger('django.request.tastypie')
log.error('Internal Server Error: %s' % request.path, exc_info=True,
extra={'status_code': response_code, 'request': request})
if django.VERSION < (1, 3, 0):
from django.core.mail import mail_admins
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (the_trace, request_repr)
mail_admins(subject, message, fail_silently=True)
# Send the signal so other apps are aware of the exception.
got_request_exception.send(self.__class__, request=request)
# Prep the data going out.
data = {
"error_message": getattr(settings, 'TASTYPIE_CANNED_ERROR', "Sorry, this request could not be processed. Please try again later."),
"traceback": the_trace,
}
return self.error_response(request, data, response_class=response_class)
def _build_reverse_url(self, name, args=None, kwargs=None):
"""
A convenience hook for overriding how URLs are built.
See ``NamespacedModelResource._build_reverse_url`` for an example.
"""
return reverse(name, args=args, kwargs=kwargs)
def base_urls(self):
"""
The standard URLs this ``Resource`` should respond to.
"""
return [
url(r"^(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name="api_dispatch_list"),
url(r"^(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/set/(?P<%s_list>\w[\w/;-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('get_multiple'), name="api_get_multiple"),
url(r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
def override_urls(self):
"""
Deprecated. Will be removed by v1.0.0. Please use ``prepend_urls`` instead.
"""
return []
def prepend_urls(self):
"""
A hook for adding your own URLs or matching before the default URLs.
"""
return []
@property
def urls(self):
"""
The endpoints this ``Resource`` responds to.
Mostly a standard URLconf, this is suitable for either automatic use
when registered with an ``Api`` class or for including directly in
a URLconf should you choose to.
"""
urls = self.prepend_urls()
overridden_urls = self.override_urls()
if overridden_urls:
warnings.warn("'override_urls' is a deprecated method & will be removed by v1.0.0. Please rename your method to ``prepend_urls``.")
urls += overridden_urls
urls += self.base_urls()
urlpatterns = patterns('',
*urls
)
return urlpatterns
def determine_format(self, request):
"""
Used to determine the desired format.
Largely relies on ``tastypie.utils.mime.determine_format`` but here
as a point of extension.
"""
return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)
def serialize(self, request, data, format, options=None):
"""
Given a request, data and a desired format, produces a serialized
version suitable for transfer over the wire.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
options = options or {}
if 'text/javascript' in format:
# get JSONP callback name. default to "callback"
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
return self._meta.serializer.serialize(data, format, options)
def deserialize(self, request, data, format='application/json'):
"""
Given a request, data and a format, deserializes the given data.
It relies on the request properly sending a ``CONTENT_TYPE`` header,
falling back to ``application/json`` if not provided.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
deserialized = self._meta.serializer.deserialize(data, format=request.META.get('CONTENT_TYPE', 'application/json'))
return deserialized
def alter_list_data_to_serialize(self, request, data):
"""
A hook to alter list data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for a list of objects, generally also including
meta data.
"""
return data
def alter_detail_data_to_serialize(self, request, data):
"""
A hook to alter detail data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for receiving a single bundle of data.
"""
return data
def alter_deserialized_list_data(self, request, data):
"""
A hook to alter list data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def alter_deserialized_detail_data(self, request, data):
"""
A hook to alter detail data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def dispatch_list(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) over
the entire list of resources.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('list', request, **kwargs)
def dispatch_detail(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) on
a single resource.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('detail', request, **kwargs)
def dispatch(self, request_type, request, **kwargs):
"""
Handles the common operations (allowed HTTP method, authentication,
throttling, method lookup) surrounding most CRUD interactions.
"""
allowed_methods = getattr(self._meta, "%s_allowed_methods" % request_type, None)
if 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META:
request.method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE']
request_method = self.method_check(request, allowed=allowed_methods)
method = getattr(self, "%s_%s" % (request_method, request_type), None)
if method is None:
raise ImmediateHttpResponse(response=http.HttpNotImplemented())
self.is_authenticated(request)
self.throttle_check(request)
# All clear. Process the request.
request = convert_post_to_put(request)
response = method(request, **kwargs)
# Add the throttled request.
self.log_throttled_access(request)
# If what comes back isn't a ``HttpResponse``, assume that the
# request was accepted and that some action occurred. This also
# prevents Django from freaking out.
if not isinstance(response, HttpResponse):
return http.HttpNoContent()
return response
def remove_api_resource_names(self, url_dict):
"""
Given a dictionary of regex matches from a URLconf, removes
``api_name`` and/or ``resource_name`` if found.
This is useful for converting URLconf matches into something suitable
for data lookup. For example::
Model.objects.filter(**self.remove_api_resource_names(matches))
"""
kwargs_subset = url_dict.copy()
for key in ['api_name', 'resource_name']:
try:
del(kwargs_subset[key])
except KeyError:
pass
return kwargs_subset
def method_check(self, request, allowed=None):
"""
Ensures that the HTTP method used on the request is allowed to be
handled by the resource.
Takes an ``allowed`` parameter, which should be a list of lowercase
HTTP methods to check against. Usually, this looks like::
# The most generic lookup.
self.method_check(request, self._meta.allowed_methods)
# A lookup against what's allowed for list-type methods.
self.method_check(request, self._meta.list_allowed_methods)
# A useful check when creating a new endpoint that only handles
# GET.
self.method_check(request, ['get'])
"""
if allowed is None:
allowed = []
request_method = request.method.lower()
allows = ','.join(map(str.upper, allowed))
if request_method == "options":
response = HttpResponse(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
if not request_method in allowed:
response = http.HttpMethodNotAllowed(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
return request_method
def is_authenticated(self, request):
"""
Handles checking if the user is authenticated and dealing with
unauthenticated users.
Mostly a hook, this uses class assigned to ``authentication`` from
``Resource._meta``.
"""
# Authenticate the request as needed.
auth_result = self._meta.authentication.is_authenticated(request)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def throttle_check(self, request):
"""
Handles checking if the user should be throttled.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
request_method = request.method.lower()
identifier = self._meta.authentication.get_identifier(request)
# Check to see if they should be throttled.
if self._meta.throttle.should_be_throttled(identifier, url=request.get_full_path(), request_method=request_method):
# Throttle limit exceeded.
raise ImmediateHttpResponse(response=http.HttpTooManyRequests())
def log_throttled_access(self, request):
"""
Handles the recording of the user's access for throttling purposes.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
request_method = request.method.lower()
self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method)
def unauthorized_result(self, exception):
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def authorized_read_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to GET this resource.
"""
try:
auth_result = self._meta.authorization.read_list(object_list, bundle)
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_read_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to GET this resource.
"""
try:
auth_result = self._meta.authorization.read_detail(object_list, bundle)
if not auth_result is True:
raise Unauthorized()
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_create_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to POST this resource.
"""
try:
auth_result = self._meta.authorization.create_list(object_list, bundle)
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_create_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to POST this resource.
"""
try:
auth_result = self._meta.authorization.create_detail(object_list, bundle)
if not auth_result is True:
raise Unauthorized()
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_update_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to PUT this resource.
"""
try:
auth_result = self._meta.authorization.update_list(object_list, bundle)
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_update_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to PUT this resource.
"""
try:
auth_result = self._meta.authorization.update_detail(object_list, bundle)
if not auth_result is True:
raise Unauthorized()
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_delete_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to DELETE this resource.
"""
try:
auth_result = self._meta.authorization.delete_list(object_list, bundle)
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_delete_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to DELETE this resource.
"""
try:
auth_result = self._meta.authorization.delete_detail(object_list, bundle)
if not auth_result:
raise Unauthorized()
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def build_bundle(self, obj=None, data=None, request=None, objects_saved=None):
"""
Given either an object, a data dictionary or both, builds a ``Bundle``
for use throughout the ``dehydrate/hydrate`` cycle.
If no object is provided, an empty object from
``Resource._meta.object_class`` is created so that attempts to access
``bundle.obj`` do not fail.
"""
if obj is None:
obj = self._meta.object_class()
return Bundle(
obj=obj,
data=data,
request=request,
objects_saved=objects_saved
)
def build_filters(self, filters=None):
"""
Allows for the filtering of applicable objects.
This needs to be implemented at the user level.'
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return filters
def apply_sorting(self, obj_list, options=None):
"""
Allows for the sorting of objects being returned.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return obj_list
def get_bundle_detail_data(self, bundle):
"""
Convenience method to return the ``detail_uri_name`` attribute off
``bundle.obj``.
Usually just accesses ``bundle.obj.pk`` by default.
"""
return getattr(bundle.obj, self._meta.detail_uri_name)
# URL-related methods.
def detail_uri_kwargs(self, bundle_or_obj):
"""
This needs to be implemented at the user level.
Given a ``Bundle`` or an object, it returns the extra kwargs needed to
generate a detail URI.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def resource_uri_kwargs(self, bundle_or_obj=None):
"""
Builds a dictionary of kwargs to help generate URIs.
Automatically provides the ``Resource.Meta.resource_name`` (and
optionally the ``Resource.Meta.api_name`` if populated by an ``Api``
object).
If the ``bundle_or_obj`` argument is provided, it calls
``Resource.detail_uri_kwargs`` for additional bits to create
"""
kwargs = {
'resource_name': self._meta.resource_name,
}
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
if bundle_or_obj is not None:
kwargs.update(self.detail_uri_kwargs(bundle_or_obj))
return kwargs
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
"""
Handles generating a resource URI.
If the ``bundle_or_obj`` argument is not provided, it builds the URI
for the list endpoint.
If the ``bundle_or_obj`` argument is provided, it builds the URI for
the detail endpoint.
Return the generated URI. If that URI can not be reversed (not found
in the URLconf), it will return an empty string.
"""
if bundle_or_obj is not None:
url_name = 'api_dispatch_detail'
try:
return self._build_reverse_url(url_name, kwargs=self.resource_uri_kwargs(bundle_or_obj))
except NoReverseMatch:
return ''
def get_via_uri(self, uri, request=None):
"""
This pulls apart the salient bits of the URI and populates the
resource via a ``obj_get``.
Optionally accepts a ``request``.
If you need custom behavior based on other portions of the URI,
simply override this method.
"""
prefix = get_script_prefix()
chomped_uri = uri
if prefix and chomped_uri.startswith(prefix):
chomped_uri = chomped_uri[len(prefix)-1:]
try:
view, args, kwargs = resolve(chomped_uri)
except Resolver404:
raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri)
bundle = self.build_bundle(request=request)
return self.obj_get(bundle=bundle, **self.remove_api_resource_names(kwargs))
# Data preparation.
def full_dehydrate(self, bundle, for_list=False):
"""
Given a bundle with an object instance, extract the information from it
to populate the resource.
"""
use_in = ['all', 'list' if for_list else 'detail']
# Dehydrate each field.
for field_name, field_object in self.fields.items():
# If it's not for use in this mode, skip
field_use_in = getattr(field_object, 'use_in', 'all')
if callable(field_use_in):
if not field_use_in(bundle):
continue
else:
if field_use_in not in use_in:
continue
# A touch leaky but it makes URI resolution work.
if getattr(field_object, 'dehydrated_type', None) == 'related':
field_object.api_name = self._meta.api_name
field_object.resource_name = self._meta.resource_name
bundle.data[field_name] = field_object.dehydrate(bundle, for_list=for_list)
# Check for an optional method to do further dehydration.
method = getattr(self, "dehydrate_%s" % field_name, None)
if method:
bundle.data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
return bundle
def dehydrate(self, bundle):
"""
A hook to allow a final manipulation of data once all fields/methods
have built out the dehydrated data.
Useful if you need to access more than one dehydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def full_hydrate(self, bundle):
"""
Given a populated bundle, distill it and turn it back into
a full-fledged object instance.
"""
if bundle.obj is None:
bundle.obj = self._meta.object_class()
bundle = self.hydrate(bundle)
for field_name, field_object in self.fields.items():
if field_object.readonly is True:
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
# NOTE: We only get back a bundle when it is related field.
if isinstance(value, Bundle) and value.errors.get(field_name):
bundle.errors[field_name] = value.errors[field_name]
if value is not None or field_object.null:
# We need to avoid populating M2M data here as that will
# cause things to blow up.
if not getattr(field_object, 'is_related', False):
setattr(bundle.obj, field_object.attribute, value)
elif not getattr(field_object, 'is_m2m', False):
if value is not None:
# NOTE: A bug fix in Django (ticket #18153) fixes incorrect behavior
# which Tastypie was relying on. To fix this, we store value.obj to
# be saved later in save_related.
try:
setattr(bundle.obj, field_object.attribute, value.obj)
except (ValueError, ObjectDoesNotExist):
bundle.related_objects_to_save[field_object.attribute] = value.obj
elif field_object.blank:
continue
elif field_object.null:
setattr(bundle.obj, field_object.attribute, value)
return bundle
def hydrate(self, bundle):
"""
A hook to allow an initial manipulation of data before all methods/fields
have built out the hydrated data.
Useful if you need to access more than one hydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def hydrate_m2m(self, bundle):
"""
Populate the ManyToMany data on the instance.
"""
if bundle.obj is None:
raise HydrationError("You must call 'full_hydrate' before attempting to run 'hydrate_m2m' on %r." % self)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if field_object.attribute:
# Note that we only hydrate the data, leaving the instance
# unmodified. It's up to the user's code to handle this.
# The ``ModelResource`` provides a working baseline
# in this regard.
bundle.data[field_name] = field_object.hydrate_m2m(bundle)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
method(bundle)
return bundle
def build_schema(self):
"""
Returns a dictionary of all the fields on the resource and some
properties about those fields.
Used by the ``schema/`` endpoint to describe what will be available.
"""
data = {
'fields': {},
'default_format': self._meta.default_format,
'allowed_list_http_methods': self._meta.list_allowed_methods,
'allowed_detail_http_methods': self._meta.detail_allowed_methods,
'default_limit': self._meta.limit,
}
if self._meta.ordering:
data['ordering'] = self._meta.ordering
if self._meta.filtering:
data['filtering'] = self._meta.filtering
for field_name, field_object in self.fields.items():
data['fields'][field_name] = {
'default': field_object.default,
'type': field_object.dehydrated_type,
'nullable': field_object.null,
'blank': field_object.blank,
'readonly': field_object.readonly,
'help_text': field_object.help_text,
'unique': field_object.unique,
}
if field_object.dehydrated_type == 'related':
if getattr(field_object, 'is_m2m', False):
related_type = 'to_many'
else:
related_type = 'to_one'
data['fields'][field_name]['related_type'] = related_type
return data
def dehydrate_resource_uri(self, bundle):
"""
For the automatically included ``resource_uri`` field, dehydrate
the URI for the given bundle.
Returns empty string if no URI can be generated.
"""
try:
return self.get_resource_uri(bundle)
except NotImplementedError:
return ''
except NoReverseMatch:
return ''
def generate_cache_key(self, *args, **kwargs):
"""
Creates a unique-enough cache key.
This is based off the current api_name/resource_name/args/kwargs.
"""
smooshed = []
for key, value in kwargs.items():
smooshed.append("%s=%s" % (key, value))
# Use a list plus a ``.join()`` because it's faster than concatenation.
return "%s:%s:%s:%s" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), ':'.join(smooshed))
# Data access methods.
def get_object_list(self, request):
"""
A hook to allow making returning the list of available objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def apply_authorization_limits(self, request, object_list):
"""
Deprecated.
FIXME: REMOVE BEFORE 1.0
"""
return self._meta.authorization.apply_limits(request, object_list)
def can_create(self):
"""
Checks to ensure ``post`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'post' in allowed
def can_update(self):
"""
Checks to ensure ``put`` is within ``allowed_methods``.
Used when hydrating related data.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'put' in allowed
def can_delete(self):
"""
Checks to ensure ``delete`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'delete' in allowed
def apply_filters(self, request, applicable_filters):
"""
A hook to alter how the filters are applied to the object list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_get_list(self, bundle, **kwargs):
"""
Fetches the list of objects available on the resource.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get_list(self, bundle, **kwargs):
"""
A version of ``obj_get_list`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('list', **kwargs)
obj_list = self._meta.cache.get(cache_key)
if obj_list is None:
obj_list = self.obj_get_list(bundle=bundle, **kwargs)
self._meta.cache.set(cache_key, obj_list)
return obj_list
def obj_get(self, bundle, **kwargs):
"""
Fetches an individual object on the resource.
This needs to be implemented at the user level. If the object can not
be found, this should raise a ``NotFound`` exception.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get(self, bundle, **kwargs):
"""
A version of ``obj_get`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('detail', **kwargs)
cached_bundle = self._meta.cache.get(cache_key)
if cached_bundle is None:
cached_bundle = self.obj_get(bundle=bundle, **kwargs)
self._meta.cache.set(cache_key, cached_bundle)
return cached_bundle
def obj_create(self, bundle, **kwargs):
"""
Creates a new object based on the provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_update(self, bundle, **kwargs):
"""
Updates an existing object (or creates a new object) based on the
provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list(self, bundle, **kwargs):
"""
Deletes an entire list of objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list_for_update(self, bundle, **kwargs):
"""
Deletes an entire list of objects, specific to PUT list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete(self, bundle, **kwargs):
"""
Deletes a single object.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs)
def error_response(self, request, errors, response_class=None):
"""
Extracts the common "which-format/serialize/return-error-response"
cycle.
Should be used as much as possible to return errors.
"""
if response_class is None:
response_class = http.HttpBadRequest
desired_format = None
if request:
if request.GET.get('callback', None) is None:
try:
desired_format = self.determine_format(request)
except BadRequest:
pass # Fall through to default handler below
else:
# JSONP can cause extra breakage.
desired_format = 'application/json'
if not desired_format:
desired_format = self._meta.default_format
try:
serialized = self.serialize(request, errors, desired_format)
except BadRequest, e:
error = "Additional errors occurred, but serialization of those errors failed."
if settings.DEBUG:
error += " %s" % e
return response_class(content=error, content_type='text/plain')
return response_class(content=serialized, content_type=build_content_type(desired_format))
def is_valid(self, bundle):
"""
Handles checking if the data provided by the user is valid.
Mostly a hook, this uses class assigned to ``validation`` from
``Resource._meta``.
If validation fails, an error is raised with the error messages
serialized inside it.
"""
errors = self._meta.validation.is_valid(bundle, bundle.request)
if errors:
bundle.errors[self._meta.resource_name] = errors
return False
return True
def rollback(self, bundles):
"""
Given the list of bundles, delete all objects pertaining to those
bundles.
This needs to be implemented at the user level. No exceptions should
be raised if possible.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
# Views.
def get_list(self, request, **kwargs):
"""
Returns a serialized list of resources.
Calls ``obj_get_list`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
# TODO: Uncached for now. Invalidation that works for everyone may be
# impossible.
base_bundle = self.build_bundle(request=request)
objects = self.obj_get_list(bundle=base_bundle, **self.remove_api_resource_names(kwargs))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_uri(), limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = []
for obj in to_be_serialized[self._meta.collection_name]:
bundle = self.build_bundle(obj=obj, request=request)
bundles.append(self.full_dehydrate(bundle, for_list=True))
to_be_serialized[self._meta.collection_name] = bundles
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def get_detail(self, request, **kwargs):
"""
Returns a single serialized resource.
Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
basic_bundle = self.build_bundle(request=request)
try:
obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle)
def post_list(self, request, **kwargs):
"""
Creates a new resource/object with the provided data.
Calls ``obj_create`` with the provided data and returns a response
with the new resource's location.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
"""
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
updated_bundle = self.obj_create(bundle, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_detail(self, request, **kwargs):
"""
Creates a new subcollection of the resource under a resource.
This is not implemented by default because most people's data models
aren't self-referential.
If a new resource is created, return ``HttpCreated`` (201 Created).
"""
return http.HttpNotImplemented()
def put_list(self, request, **kwargs):
"""
Replaces a collection of resources with another collection.
Calls ``delete_list`` to clear out the collection then ``obj_create``
with the provided the data to create the new collection.
Return ``HttpNoContent`` (204 No Content) if
``Meta.always_return_data = False`` (default).
Return ``HttpAccepted`` (202 Accepted) if
``Meta.always_return_data = True``.
"""
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_list_data(request, deserialized)
if not self._meta.collection_name in deserialized:
raise BadRequest("Invalid data sent.")
basic_bundle = self.build_bundle(request=request)
self.obj_delete_list_for_update(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
bundles_seen = []
for object_data in deserialized[self._meta.collection_name]:
bundle = self.build_bundle(data=dict_strip_unicode_keys(object_data), request=request)
# Attempt to be transactional, deleting any previously created
# objects if validation fails.
try:
self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs))
bundles_seen.append(bundle)
except ImmediateHttpResponse:
self.rollback(bundles_seen)
raise
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
to_be_serialized = {}
to_be_serialized[self._meta.collection_name] = [self.full_dehydrate(bundle, for_list=True) for bundle in bundles_seen]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted)
def put_detail(self, request, **kwargs):
"""
Either updates an existing resource or creates a new one with the
provided data.
Calls ``obj_update`` with the provided data first, but falls back to
``obj_create`` if the object does not already exist.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
If an existing resource is modified and
``Meta.always_return_data = False`` (default), return ``HttpNoContent``
(204 No Content).
If an existing resource is modified and
``Meta.always_return_data = True``, return ``HttpAccepted`` (202
Accepted).
"""
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
try:
updated_bundle = self.obj_update(bundle=bundle, **self.remove_api_resource_names(kwargs))
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpAccepted)
except (NotFound, MultipleObjectsReturned):
updated_bundle = self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def delete_list(self, request, **kwargs):
"""
Destroys a collection of resources/objects.
Calls ``obj_delete_list``.
If the resources are deleted, return ``HttpNoContent`` (204 No Content).
"""
bundle = self.build_bundle(request=request)
self.obj_delete_list(bundle=bundle, request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
def delete_detail(self, request, **kwargs):
"""
Destroys a single resource/object.
Calls ``obj_delete``.
If the resource is deleted, return ``HttpNoContent`` (204 No Content).
If the resource did not exist, return ``Http404`` (404 Not Found).
"""
# Manually construct the bundle here, since we don't want to try to
# delete an empty instance.
bundle = Bundle(request=request)
try:
self.obj_delete(bundle=bundle, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
except NotFound:
return http.HttpNotFound()
def patch_list(self, request, **kwargs):
"""
Updates a collection in-place.
The exact behavior of ``PATCH`` to a list resource is still the matter of
some debate in REST circles, and the ``PATCH`` RFC isn't standard. So the
behavior this method implements (described below) is something of a
stab in the dark. It's mostly cribbed from GData, with a smattering
of ActiveResource-isms and maybe even an original idea or two.
The ``PATCH`` format is one that's similar to the response returned from
a ``GET`` on a list resource::
{
"objects": [{object}, {object}, ...],
"deleted_objects": ["URI", "URI", "URI", ...],
}
For each object in ``objects``:
* If the dict does not have a ``resource_uri`` key then the item is
considered "new" and is handled like a ``POST`` to the resource list.
* If the dict has a ``resource_uri`` key and the ``resource_uri`` refers
to an existing resource then the item is a update; it's treated
like a ``PATCH`` to the corresponding resource detail.
* If the dict has a ``resource_uri`` but the resource *doesn't* exist,
then this is considered to be a create-via-``PUT``.
Each entry in ``deleted_objects`` referes to a resource URI of an existing
resource to be deleted; each is handled like a ``DELETE`` to the relevent
resource.
In any case:
* If there's a resource URI it *must* refer to a resource of this
type. It's an error to include a URI of a different resource.
* ``PATCH`` is all or nothing. If a single sub-operation fails, the
entire request will fail and all resources will be rolled back.
* For ``PATCH`` to work, you **must** have ``put`` in your
:ref:`detail-allowed-methods` setting.
* To delete objects via ``deleted_objects`` in a ``PATCH`` request you
**must** have ``delete`` in your :ref:`detail-allowed-methods`
setting.
Substitute appropriate names for ``objects`` and
``deleted_objects`` if ``Meta.collection_name`` is set to something
other than ``objects`` (default).
"""
request = convert_post_to_patch(request)
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
collection_name = self._meta.collection_name
deleted_collection_name = 'deleted_%s' % collection_name
if collection_name not in deserialized:
raise BadRequest("Invalid data sent: missing '%s'" % collection_name)
if len(deserialized[collection_name]) and 'put' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
bundles_seen = []
for data in deserialized[collection_name]:
# If there's a resource_uri then this is either an
# update-in-place or a create-via-PUT.
if "resource_uri" in data:
uri = data.pop('resource_uri')
try:
obj = self.get_via_uri(uri, request=request)
# The object does exist, so this is an update-in-place.
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle, for_list=True)
bundle = self.alter_detail_data_to_serialize(request, bundle)
self.update_in_place(request, bundle, data)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# The object referenced by resource_uri doesn't exist,
# so this is a create-by-PUT equivalent.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle=bundle)
else:
# There's no resource URI, so this is a create call just
# like a POST to the list resource.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle=bundle)
bundles_seen.append(bundle)
deleted_collection = deserialized.get(deleted_collection_name, [])
if deleted_collection:
if 'delete' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for uri in deleted_collection:
obj = self.get_via_uri(uri, request=request)
bundle = self.build_bundle(obj=obj, request=request)
self.obj_delete(bundle=bundle)
if not self._meta.always_return_data:
return http.HttpAccepted()
else:
to_be_serialized = {}
to_be_serialized['objects'] = [self.full_dehydrate(bundle, for_list=True) for bundle in bundles_seen]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted)
def patch_detail(self, request, **kwargs):
"""
Updates a resource in-place.
Calls ``obj_update``.
If the resource is updated, return ``HttpAccepted`` (202 Accepted).
If the resource did not exist, return ``HttpNotFound`` (404 Not Found).
"""
request = convert_post_to_patch(request)
basic_bundle = self.build_bundle(request=request)
# We want to be able to validate the update, but we can't just pass
# the partial data into the validator since all data needs to be
# present. Instead, we basically simulate a PUT by pulling out the
# original data and updating it in-place.
# So first pull out the original object. This is essentially
# ``get_detail``.
try:
obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
# Now update the bundle in-place.
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
self.update_in_place(request, bundle, deserialized)
if not self._meta.always_return_data:
return http.HttpAccepted()
else:
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle, response_class=http.HttpAccepted)
def update_in_place(self, request, original_bundle, new_data):
"""
Update the object in original_bundle in-place using new_data.
"""
original_bundle.data.update(**dict_strip_unicode_keys(new_data))
# Now we've got a bundle with the new data sitting in it and we're
# we're basically in the same spot as a PUT request. SO the rest of this
# function is cribbed from put_detail.
self.alter_deserialized_detail_data(request, original_bundle.data)
kwargs = {
self._meta.detail_uri_name: self.get_bundle_detail_data(original_bundle),
'request': request,
}
return self.obj_update(bundle=original_bundle, **kwargs)
def get_schema(self, request, **kwargs):
"""
Returns a serialized form of the schema of the resource.
Calls ``build_schema`` to generate the data. This method only responds
to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
bundle = self.build_bundle(request=request)
self.authorized_read_detail(self.get_object_list(bundle.request), bundle)
return self.create_response(request, self.build_schema())
def get_multiple(self, request, **kwargs):
"""
Returns a serialized list of resources based on the identifiers
from the URL.
Calls ``obj_get`` to fetch only the objects requested. This method
only responds to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Rip apart the list then iterate.
kwarg_name = '%s_list' % self._meta.detail_uri_name
obj_identifiers = kwargs.get(kwarg_name, '').split(';')
objects = []
not_found = []
base_bundle = self.build_bundle(request=request)
for identifier in obj_identifiers:
try:
obj = self.obj_get(bundle=base_bundle, **{self._meta.detail_uri_name: identifier})
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle, for_list=True)
objects.append(bundle)
except (ObjectDoesNotExist, Unauthorized):
not_found.append(identifier)
object_list = {
self._meta.collection_name: objects,
}
if len(not_found):
object_list['not_found'] = not_found
self.log_throttled_access(request)
return self.create_response(request, object_list)
class ModelDeclarativeMetaclass(DeclarativeMetaclass):
def __new__(cls, name, bases, attrs):
meta = attrs.get('Meta')
if meta and hasattr(meta, 'queryset'):
setattr(meta, 'object_class', meta.queryset.model)
new_class = super(ModelDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
include_fields = getattr(new_class._meta, 'fields', [])
excludes = getattr(new_class._meta, 'excludes', [])
field_names = new_class.base_fields.keys()
for field_name in field_names:
if field_name == 'resource_uri':
continue
if field_name in new_class.declared_fields:
continue
if len(include_fields) and not field_name in include_fields:
del(new_class.base_fields[field_name])
if len(excludes) and field_name in excludes:
del(new_class.base_fields[field_name])
# Add in the new fields.
new_class.base_fields.update(new_class.get_fields(include_fields, excludes))
if getattr(new_class._meta, 'include_absolute_url', True):
if not 'absolute_url' in new_class.base_fields:
new_class.base_fields['absolute_url'] = fields.CharField(attribute='get_absolute_url', readonly=True)
elif 'absolute_url' in new_class.base_fields and not 'absolute_url' in attrs:
del(new_class.base_fields['absolute_url'])
return new_class
class ModelResource(Resource):
"""
A subclass of ``Resource`` designed to work with Django's ``Models``.
This class will introspect a given ``Model`` and build a field list based
on the fields found on the model (excluding relational fields).
Given that it is aware of Django's ORM, it also handles the CRUD data
operations of the resource.
"""
__metaclass__ = ModelDeclarativeMetaclass
@classmethod
def should_skip_field(cls, field):
"""
Given a Django model field, return if it should be included in the
contributed ApiFields.
"""
# Ignore certain fields (related fields).
if getattr(field, 'rel'):
return True
return False
@classmethod
def api_field_from_django_field(cls, f, default=fields.CharField):
"""
Returns the field type that would likely be associated with each
Django type.
"""
result = default
internal_type = f.get_internal_type()
if internal_type in ('DateField', 'DateTimeField'):
result = fields.DateTimeField
elif internal_type in ('BooleanField', 'NullBooleanField'):
result = fields.BooleanField
elif internal_type in ('FloatField',):
result = fields.FloatField
elif internal_type in ('DecimalField',):
result = fields.DecimalField
elif internal_type in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField', 'AutoField'):
result = fields.IntegerField
elif internal_type in ('FileField', 'ImageField'):
result = fields.FileField
elif internal_type == 'TimeField':
result = fields.TimeField
# TODO: Perhaps enable these via introspection. The reason they're not enabled
# by default is the very different ``__init__`` they have over
# the other fields.
# elif internal_type == 'ForeignKey':
# result = ForeignKey
# elif internal_type == 'ManyToManyField':
# result = ManyToManyField
return result
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated model.
"""
final_fields = {}
fields = fields or []
excludes = excludes or []
if not cls._meta.object_class:
return final_fields
for f in cls._meta.object_class._meta.fields:
# If the field name is already present, skip
if f.name in cls.base_fields:
continue
# If field is not present in explicit field listing, skip
if fields and f.name not in fields:
continue
# If field is in exclude list, skip
if excludes and f.name in excludes:
continue
if cls.should_skip_field(f):
continue
api_field_class = cls.api_field_from_django_field(f)
kwargs = {
'attribute': f.name,
'help_text': f.help_text,
}
if f.null is True:
kwargs['null'] = True
kwargs['unique'] = f.unique
if not f.null and f.blank is True:
kwargs['default'] = ''
kwargs['blank'] = True
if f.get_internal_type() == 'TextField':
kwargs['default'] = ''
if f.has_default():
kwargs['default'] = f.default
if getattr(f, 'auto_now', False):
kwargs['default'] = f.auto_now
if getattr(f, 'auto_now_add', False):
kwargs['default'] = f.auto_now_add
final_fields[f.name] = api_field_class(**kwargs)
final_fields[f.name].instance_name = f.name
return final_fields
def check_filtering(self, field_name, filter_type='exact', filter_bits=None):
"""
Given a field name, a optional filter type and an optional list of
additional relations, determine if a field can be filtered on.
If a filter does not meet the needed conditions, it should raise an
``InvalidFilterError``.
If the filter meets the conditions, a list of attribute names (not
field names) will be returned.
"""
if filter_bits is None:
filter_bits = []
if not field_name in self._meta.filtering:
raise InvalidFilterError("The '%s' field does not allow filtering." % field_name)
# Check to see if it's an allowed lookup type.
if not self._meta.filtering[field_name] in (ALL, ALL_WITH_RELATIONS):
# Must be an explicit whitelist.
if not filter_type in self._meta.filtering[field_name]:
raise InvalidFilterError("'%s' is not an allowed filter on the '%s' field." % (filter_type, field_name))
if self.fields[field_name].attribute is None:
raise InvalidFilterError("The '%s' field has no 'attribute' for searching with." % field_name)
# Check to see if it's a relational lookup and if that's allowed.
if len(filter_bits):
if not getattr(self.fields[field_name], 'is_related', False):
raise InvalidFilterError("The '%s' field does not support relations." % field_name)
if not self._meta.filtering[field_name] == ALL_WITH_RELATIONS:
raise InvalidFilterError("Lookups are not allowed more than one level deep on the '%s' field." % field_name)
# Recursively descend through the remaining lookups in the filter,
# if any. We should ensure that all along the way, we're allowed
# to filter on that field by the related resource.
related_resource = self.fields[field_name].get_related_resource(None)
return [self.fields[field_name].attribute] + related_resource.check_filtering(filter_bits[0], filter_type, filter_bits[1:])
return [self.fields[field_name].attribute]
def filter_value_to_python(self, value, field_name, filters, filter_expr,
filter_type):
"""
Turn the string ``value`` into a python object.
"""
# Simple values
if value in ['true', 'True', True]:
value = True
elif value in ['false', 'False', False]:
value = False
elif value in ('nil', 'none', 'None', None):
value = None
# Split on ',' if not empty string and either an in or range filter.
if filter_type in ('in', 'range') and len(value):
if hasattr(filters, 'getlist'):
value = []
for part in filters.getlist(filter_expr):
value.extend(part.split(','))
else:
value = value.split(',')
return value
def build_filters(self, filters=None):
"""
Given a dictionary of filters, create the necessary ORM-level filters.
Keys should be resource fields, **NOT** model fields.
Valid values are either a list of Django filter types (i.e.
``['startswith', 'exact', 'lte']``), the ``ALL`` constant or the
``ALL_WITH_RELATIONS`` constant.
"""
# At the declarative level:
# filtering = {
# 'resource_field_name': ['exact', 'startswith', 'endswith', 'contains'],
# 'resource_field_name_2': ['exact', 'gt', 'gte', 'lt', 'lte', 'range'],
# 'resource_field_name_3': ALL,
# 'resource_field_name_4': ALL_WITH_RELATIONS,
# ...
# }
# Accepts the filters as a dict. None by default, meaning no filters.
if filters is None:
filters = {}
qs_filters = {}
if getattr(self._meta, 'queryset', None) is not None:
# Get the possible query terms from the current QuerySet.
if hasattr(self._meta.queryset.query.query_terms, 'keys'):
# Django 1.4 & below compatibility.
query_terms = self._meta.queryset.query.query_terms.keys()
else:
# Django 1.5+.
query_terms = self._meta.queryset.query.query_terms
else:
if hasattr(QUERY_TERMS, 'keys'):
# Django 1.4 & below compatibility.
query_terms = QUERY_TERMS.keys()
else:
# Django 1.5+.
query_terms = QUERY_TERMS
for filter_expr, value in filters.items():
filter_bits = filter_expr.split(LOOKUP_SEP)
field_name = filter_bits.pop(0)
filter_type = 'exact'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
continue
if len(filter_bits) and filter_bits[-1] in query_terms:
filter_type = filter_bits.pop()
lookup_bits = self.check_filtering(field_name, filter_type, filter_bits)
value = self.filter_value_to_python(value, field_name, filters, filter_expr, filter_type)
db_field_name = LOOKUP_SEP.join(lookup_bits)
qs_filter = "%s%s%s" % (db_field_name, LOOKUP_SEP, filter_type)
qs_filters[qs_filter] = value
return dict_strip_unicode_keys(qs_filters)
def apply_sorting(self, obj_list, options=None):
"""
Given a dictionary of options, apply some ORM-level sorting to the
provided ``QuerySet``.
Looks for the ``order_by`` key and handles either ascending (just the
field name) or descending (the field name with a ``-`` in front).
The field name should be the resource field, **NOT** model field.
"""
if options is None:
options = {}
parameter_name = 'order_by'
if not 'order_by' in options:
if not 'sort_by' in options:
# Nothing to alter the order. Return what we've got.
return obj_list
else:
warnings.warn("'sort_by' is a deprecated parameter. Please use 'order_by' instead.")
parameter_name = 'sort_by'
order_by_args = []
if hasattr(options, 'getlist'):
order_bits = options.getlist(parameter_name)
else:
order_bits = options.get(parameter_name)
if not isinstance(order_bits, (list, tuple)):
order_bits = [order_bits]
for order_by in order_bits:
order_by_bits = order_by.split(LOOKUP_SEP)
field_name = order_by_bits[0]
order = ''
if order_by_bits[0].startswith('-'):
field_name = order_by_bits[0][1:]
order = '-'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
raise InvalidSortError("No matching '%s' field for ordering on." % field_name)
if not field_name in self._meta.ordering:
raise InvalidSortError("The '%s' field does not allow ordering." % field_name)
if self.fields[field_name].attribute is None:
raise InvalidSortError("The '%s' field has no 'attribute' for ordering with." % field_name)
order_by_args.append("%s%s" % (order, LOOKUP_SEP.join([self.fields[field_name].attribute] + order_by_bits[1:])))
return obj_list.order_by(*order_by_args)
def apply_filters(self, request, applicable_filters):
"""
An ORM-specific implementation of ``apply_filters``.
The default simply applies the ``applicable_filters`` as ``**kwargs``,
but should make it possible to do more advanced things.
"""
return self.get_object_list(request).filter(**applicable_filters)
def get_object_list(self, request):
"""
An ORM-specific implementation of ``get_object_list``.
Returns a queryset that may have been limited by other overrides.
"""
return self._meta.queryset._clone()
def obj_get_list(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_get_list``.
Takes an optional ``request`` object, whose ``GET`` dictionary can be
used to narrow the query.
"""
filters = {}
if hasattr(bundle.request, 'GET'):
# Grab a mutable copy.
filters = bundle.request.GET.copy()
# Update with the provided kwargs.
filters.update(kwargs)
applicable_filters = self.build_filters(filters=filters)
try:
objects = self.apply_filters(bundle.request, applicable_filters)
return self.authorized_read_list(objects, bundle)
except ValueError:
raise BadRequest("Invalid resource lookup data provided (mismatched type).")
def obj_get(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_get``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
try:
object_list = self.get_object_list(bundle.request).filter(**kwargs)
stringified_kwargs = ', '.join(["%s=%s" % (k, v) for k, v in kwargs.items()])
if len(object_list) <= 0:
raise self._meta.object_class.DoesNotExist("Couldn't find an instance of '%s' which matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
elif len(object_list) > 1:
raise MultipleObjectsReturned("More than '%s' matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
bundle.obj = object_list[0]
self.authorized_read_detail(object_list, bundle)
return bundle.obj
except ValueError:
raise NotFound("Invalid resource lookup data provided (mismatched type).")
def obj_create(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_create``.
"""
bundle.obj = self._meta.object_class()
for key, value in kwargs.items():
setattr(bundle.obj, key, value)
self.authorized_create_detail(self.get_object_list(bundle.request), bundle)
bundle = self.full_hydrate(bundle)
return self.save(bundle)
def lookup_kwargs_with_identifiers(self, bundle, kwargs):
"""
Kwargs here represent uri identifiers Ex: /repos/<user_id>/<repo_name>/
We need to turn those identifiers into Python objects for generating
lookup parameters that can find them in the DB
"""
lookup_kwargs = {}
bundle.obj = self.get_object_list(bundle.request).model()
# Override data values, we rely on uri identifiers
bundle.data.update(kwargs)
# We're going to manually hydrate, as opposed to calling
# ``full_hydrate``, to ensure we don't try to flesh out related
# resources & keep things speedy.
bundle = self.hydrate(bundle)
for identifier in kwargs:
if identifier == self._meta.detail_uri_name:
lookup_kwargs[identifier] = kwargs[identifier]
continue
field_object = self.fields[identifier]
# Skip readonly or related fields.
if field_object.readonly is True or getattr(field_object, 'is_related', False):
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % identifier, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
lookup_kwargs[identifier] = value
return lookup_kwargs
def obj_update(self, bundle, skip_errors=False, **kwargs):
"""
A ORM-specific implementation of ``obj_update``.
"""
if not bundle.obj or not self.get_bundle_detail_data(bundle):
try:
lookup_kwargs = self.lookup_kwargs_with_identifiers(bundle, kwargs)
except:
# if there is trouble hydrating the data, fall back to just
# using kwargs by itself (usually it only contains a "pk" key
# and this will work fine.
lookup_kwargs = kwargs
try:
bundle.obj = self.obj_get(bundle=bundle, **lookup_kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
return self.save(bundle, skip_errors=skip_errors)
def obj_delete_list(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list``.
"""
objects_to_delete = self.obj_get_list(bundle=bundle, **kwargs)
deletable_objects = self.authorized_delete_list(objects_to_delete, bundle)
if hasattr(deletable_objects, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
deletable_objects.delete()
else:
for authed_obj in deletable_objects:
authed_obj.delete()
def obj_delete_list_for_update(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list_for_update``.
"""
objects_to_delete = self.obj_get_list(bundle=bundle, **kwargs)
deletable_objects = self.authorized_update_list(objects_to_delete, bundle)
if hasattr(deletable_objects, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
deletable_objects.delete()
else:
for authed_obj in deletable_objects:
authed_obj.delete()
def obj_delete(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
if not hasattr(bundle.obj, 'delete'):
try:
bundle.obj = self.obj_get(bundle=bundle, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
self.authorized_delete_detail(self.get_object_list(bundle.request), bundle)
bundle.obj.delete()
@transaction.commit_on_success()
def patch_list(self, request, **kwargs):
"""
An ORM-specific implementation of ``patch_list``.
Necessary because PATCH should be atomic (all-success or all-fail)
and the only way to do this neatly is at the database level.
"""
return super(ModelResource, self).patch_list(request, **kwargs)
def rollback(self, bundles):
"""
A ORM-specific implementation of ``rollback``.
Given the list of bundles, delete all models pertaining to those
bundles.
"""
for bundle in bundles:
if bundle.obj and self.get_bundle_detail_data(bundle):
bundle.obj.delete()
def create_identifier(self, obj):
return u"%s.%s.%s" % (obj._meta.app_label, obj._meta.module_name, obj.pk)
def save(self, bundle, skip_errors=False):
self.is_valid(bundle)
if bundle.errors and not skip_errors:
raise ImmediateHttpResponse(response=self.error_response(bundle.request, bundle.errors))
# Check if they're authorized.
if bundle.obj.pk:
self.authorized_update_detail(self.get_object_list(bundle.request), bundle)
else:
self.authorized_create_detail(self.get_object_list(bundle.request), bundle)
# Save FKs just in case.
self.save_related(bundle)
# Save the main object.
bundle.obj.save()
bundle.objects_saved.add(self.create_identifier(bundle.obj))
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def save_related(self, bundle):
"""
Handles the saving of related non-M2M data.
Calling assigning ``child.parent = parent`` & then calling
``Child.save`` isn't good enough to make sure the ``parent``
is saved.
To get around this, we go through all our related fields &
call ``save`` on them if they have related, non-M2M data.
M2M data is handled by the ``ModelResource.save_m2m`` method.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_related', False):
continue
if getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
if field_object.blank and not bundle.data.has_key(field_name):
continue
# Get the object.
try:
related_obj = getattr(bundle.obj, field_object.attribute)
except ObjectDoesNotExist:
related_obj = bundle.related_objects_to_save.get(field_object.attribute, None)
# Because sometimes it's ``None`` & that's OK.
if related_obj:
if field_object.related_name:
if not self.get_bundle_detail_data(bundle):
bundle.obj.save()
setattr(related_obj, field_object.related_name, bundle.obj)
related_resource = field_object.get_related_resource(related_obj)
# Before we build the bundle & try saving it, let's make sure we
# haven't already saved it.
obj_id = self.create_identifier(related_obj)
if obj_id in bundle.objects_saved:
# It's already been saved. We're done here.
continue
if bundle.data.get(field_name) and hasattr(bundle.data[field_name], 'keys'):
# Only build & save if there's data, not just a URI.
related_bundle = related_resource.build_bundle(
obj=related_obj,
data=bundle.data.get(field_name),
request=bundle.request,
objects_saved=bundle.objects_saved
)
related_resource.save(related_bundle)
setattr(bundle.obj, field_object.attribute, related_obj)
def save_m2m(self, bundle):
"""
Handles the saving of related M2M data.
Due to the way Django works, the M2M data must be handled after the
main instance, which is why this isn't a part of the main ``save`` bits.
Currently slightly inefficient in that it will clear out the whole
relation and recreate the related data as needed.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
# Get the manager.
related_mngr = None
if isinstance(field_object.attribute, basestring):
related_mngr = getattr(bundle.obj, field_object.attribute)
elif callable(field_object.attribute):
related_mngr = field_object.attribute(bundle)
if not related_mngr:
continue
if hasattr(related_mngr, 'clear'):
# FIXME: Dupe the original bundle, copy in the new object &
# check the perms on that (using the related resource)?
# Clear it out, just to be safe.
related_mngr.clear()
related_objs = []
for related_bundle in bundle.data[field_name]:
related_resource = field_object.get_related_resource(bundle.obj)
# Before we build the bundle & try saving it, let's make sure we
# haven't already saved it.
obj_id = self.create_identifier(related_bundle.obj)
if obj_id in bundle.objects_saved:
# It's already been saved. We're done here.
continue
# Only build & save if there's data, not just a URI.
updated_related_bundle = related_resource.build_bundle(
obj=related_bundle.obj,
data=related_bundle.data,
request=bundle.request,
objects_saved=bundle.objects_saved
)
#Only save related models if they're newly added.
if updated_related_bundle.obj._state.adding:
related_resource.save(updated_related_bundle)
related_objs.append(updated_related_bundle.obj)
related_mngr.add(*related_objs)
def detail_uri_kwargs(self, bundle_or_obj):
"""
Given a ``Bundle`` or an object (typically a ``Model`` instance),
it returns the extra kwargs needed to generate a detail URI.
By default, it uses the model's ``pk`` in order to create the URI.
"""
kwargs = {}
if isinstance(bundle_or_obj, Bundle):
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj.obj, self._meta.detail_uri_name)
else:
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj, self._meta.detail_uri_name)
return kwargs
class NamespacedModelResource(ModelResource):
"""
A ModelResource subclass that respects Django namespaces.
"""
def _build_reverse_url(self, name, args=None, kwargs=None):
namespaced = "%s:%s" % (self._meta.urlconf_namespace, name)
return reverse(namespaced, args=args, kwargs=kwargs)
# Based off of ``piston.utils.coerce_put_post``. Similarly BSD-licensed.
# And no, the irony is not lost on me.
def convert_post_to_VERB(request, verb):
"""
Force Django to process the VERB.
"""
if request.method == verb:
if hasattr(request, '_post'):
del(request._post)
del(request._files)
try:
request.method = "POST"
request._load_post_and_files()
request.method = verb
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = verb
setattr(request, verb, request.POST)
return request
def convert_post_to_put(request):
return convert_post_to_VERB(request, verb='PUT')
def convert_post_to_patch(request):
return convert_post_to_VERB(request, verb='PATCH')
| agpl-3.0 | 4,679,845,014,758,824,000 | 37.120434 | 213 | 0.600623 | false | 4.360672 | false | false | false |
MuffinMedic/CloudBot | cloudbot/hook.py | 1 | 14146 | import collections
import inspect
import re
from enum import Enum, unique, IntEnum
from cloudbot.event import EventType
valid_command_re = re.compile(r"^\w+$")
@unique
class Priority(IntEnum):
# Reversed to maintain compatibility with sieve hooks numeric priority
LOWEST = 127
LOW = 63
NORMAL = 0
HIGH = -64
HIGHEST = -128
@unique
class Action(Enum):
"""Defines the action to take after executing a hook"""
HALTTYPE = 0 # Once this hook executes, no other hook of that type should run
HALTALL = 1 # Once this hook executes, No other hook should run
CONTINUE = 2 # Normal execution of all hooks
class _Hook:
"""
:type function: function
:type type: str
:type kwargs: dict[str, unknown]
"""
def __init__(self, function, _type):
"""
:type function: function
:type _type: str
"""
self.function = function
self.type = _type
self.kwargs = {}
def _add_hook(self, kwargs):
"""
:type kwargs: dict[str, unknown]
"""
# update kwargs, overwriting duplicates
self.kwargs.update(kwargs)
class _CommandHook(_Hook):
"""
:type main_alias: str
:type aliases: set[str]
"""
def __init__(self, function):
"""
:type function: function
"""
_Hook.__init__(self, function, "command")
self.aliases = set()
self.main_alias = None
if function.__doc__:
self.doc = function.__doc__.split('\n', 1)[0]
else:
self.doc = None
def add_hook(self, alias_param, kwargs):
"""
:type alias_param: list[str] | str
"""
self._add_hook(kwargs)
if not alias_param:
alias_param = self.function.__name__
if isinstance(alias_param, str):
alias_param = [alias_param]
if not self.main_alias:
self.main_alias = alias_param[0]
for alias in alias_param:
if not valid_command_re.match(alias):
raise ValueError("Invalid command name {}".format(alias))
self.aliases.update(alias_param)
class _RegexHook(_Hook):
"""
:type regexes: list[re.__Regex]
"""
def __init__(self, function):
"""
:type function: function
"""
_Hook.__init__(self, function, "regex")
self.regexes = []
def add_hook(self, regex_param, kwargs):
"""
:type regex_param: Iterable[str | re.__Regex] | str | re.__Regex
:type kwargs: dict[str, unknown]
"""
self._add_hook(kwargs)
# add all regex_parameters to valid regexes
if isinstance(regex_param, str):
# if the parameter is a string, compile and add
self.regexes.append(re.compile(regex_param))
elif hasattr(regex_param, "search"):
# if the parameter is an re.__Regex, just add it
# we only use regex.search anyways, so this is a good determiner
self.regexes.append(regex_param)
else:
assert isinstance(regex_param, collections.Iterable)
# if the parameter is a list, add each one
for re_to_match in regex_param:
if isinstance(re_to_match, str):
re_to_match = re.compile(re_to_match)
else:
# make sure that the param is either a compiled regex, or has a search attribute.
assert hasattr(re_to_match, "search")
self.regexes.append(re_to_match)
class _RawHook(_Hook):
"""
:type triggers: set[str]
"""
def __init__(self, function):
"""
:type function: function
"""
_Hook.__init__(self, function, "irc_raw")
self.triggers = set()
def add_hook(self, trigger_param, kwargs):
"""
:type trigger_param: list[str] | str
:type kwargs: dict[str, unknown]
"""
self._add_hook(kwargs)
if isinstance(trigger_param, str):
self.triggers.add(trigger_param)
else:
# it's a list
self.triggers.update(trigger_param)
class _PeriodicHook(_Hook):
def __init__(self, function):
"""
:type function: function
"""
_Hook.__init__(self, function, "periodic")
self.interval = 60.0
def add_hook(self, interval, kwargs):
"""
:type interval: int
:type kwargs: dict[str, unknown]
"""
self._add_hook(kwargs)
if interval:
self.interval = interval
class _EventHook(_Hook):
"""
:type types: set[cloudbot.event.EventType]
"""
def __init__(self, function):
"""
:type function: function
"""
_Hook.__init__(self, function, "event")
self.types = set()
def add_hook(self, trigger_param, kwargs):
"""
:type trigger_param: cloudbot.event.EventType | list[cloudbot.event.EventType]
:type kwargs: dict[str, unknown]
"""
self._add_hook(kwargs)
if isinstance(trigger_param, EventType):
self.types.add(trigger_param)
else:
# it's a list
self.types.update(trigger_param)
class _CapHook(_Hook):
def __init__(self, func, _type):
super().__init__(func, "on_cap_{}".format(_type))
self.caps = set()
def add_hook(self, caps, kwargs):
self._add_hook(kwargs)
self.caps.update(caps)
class _PermissionHook(_Hook):
def __init__(self, func):
super().__init__(func, "perm_check")
self.perms = set()
def add_hook(self, perms, kwargs):
self._add_hook(kwargs)
self.perms.update(perms)
def _add_hook(func, hook):
if not hasattr(func, "_cloudbot_hook"):
func._cloudbot_hook = {}
else:
assert hook.type not in func._cloudbot_hook # in this case the hook should be using the add_hook method
func._cloudbot_hook[hook.type] = hook
def _get_hook(func, hook_type):
if hasattr(func, "_cloudbot_hook") and hook_type in func._cloudbot_hook:
return func._cloudbot_hook[hook_type]
return None
def command(*args, **kwargs):
"""External command decorator. Can be used directly as a decorator, or with args to return a decorator.
:type param: str | list[str] | function
"""
def _command_hook(func, alias_param=None):
hook = _get_hook(func, "command")
if hook is None:
hook = _CommandHook(func)
_add_hook(func, hook)
hook.add_hook(alias_param, kwargs)
return func
if len(args) == 1 and callable(args[0]): # this decorator is being used directly
return _command_hook(args[0])
# this decorator is being used indirectly, so return a decorator function
return lambda func: _command_hook(func, alias_param=args)
def irc_raw(triggers_param, **kwargs):
"""External raw decorator. Must be used as a function to return a decorator
:type triggers_param: str | list[str]
"""
def _raw_hook(func):
hook = _get_hook(func, "irc_raw")
if hook is None:
hook = _RawHook(func)
_add_hook(func, hook)
hook.add_hook(triggers_param, kwargs)
return func
if callable(triggers_param): # this decorator is being used directly, which isn't good
raise TypeError("@irc_raw() must be used as a function that returns a decorator")
# this decorator is being used as a function, so return a decorator
return lambda func: _raw_hook(func)
def event(types_param, **kwargs):
"""External event decorator. Must be used as a function to return a decorator
:type types_param: cloudbot.event.EventType | list[cloudbot.event.EventType]
"""
def _event_hook(func):
hook = _get_hook(func, "event")
if hook is None:
hook = _EventHook(func)
_add_hook(func, hook)
hook.add_hook(types_param, kwargs)
return func
if callable(types_param): # this decorator is being used directly, which isn't good
raise TypeError("@irc_raw() must be used as a function that returns a decorator")
# this decorator is being used as a function, so return a decorator
return lambda func: _event_hook(func)
def regex(regex_param, **kwargs):
"""External regex decorator. Must be used as a function to return a decorator.
:type regex_param: str | re.__Regex | list[str | re.__Regex]
:type flags: int
"""
def _regex_hook(func):
hook = _get_hook(func, "regex")
if hook is None:
hook = _RegexHook(func)
_add_hook(func, hook)
hook.add_hook(regex_param, kwargs)
return func
if callable(regex_param): # this decorator is being used directly, which isn't good
raise TypeError("@regex() hook must be used as a function that returns a decorator")
# this decorator is being used as a function, so return a decorator
return lambda func: _regex_hook(func)
def sieve(param=None, **kwargs):
"""External sieve decorator. Can be used directly as a decorator, or with args to return a decorator
:type param: function | None
"""
def _sieve_hook(func):
assert len(inspect.signature(func).parameters) == 3, \
"Sieve plugin has incorrect argument count. Needs params: bot, input, plugin"
hook = _get_hook(func, "sieve")
if hook is None:
hook = _Hook(func, "sieve") # there's no need to have a specific SieveHook object
_add_hook(func, hook)
hook._add_hook(kwargs)
return func
if callable(param):
return _sieve_hook(param)
return lambda func: _sieve_hook(func)
def periodic(interval, **kwargs):
"""External on_start decorator. Can be used directly as a decorator, or with args to return a decorator
:type param: function | None
"""
def _periodic_hook(func):
hook = _get_hook(func, "periodic")
if hook is None:
hook = _PeriodicHook(func)
_add_hook(func, hook)
hook.add_hook(interval, kwargs)
return func
if callable(interval): # this decorator is being used directly, which isn't good
raise TypeError("@periodic() hook must be used as a function that returns a decorator")
# this decorator is being used as a function, so return a decorator
return lambda func: _periodic_hook(func)
def on_start(param=None, **kwargs):
"""External on_start decorator. Can be used directly as a decorator, or with args to return a decorator
:type param: function | None
"""
def _on_start_hook(func):
hook = _get_hook(func, "on_start")
if hook is None:
hook = _Hook(func, "on_start")
_add_hook(func, hook)
hook._add_hook(kwargs)
return func
if callable(param):
return _on_start_hook(param)
return lambda func: _on_start_hook(func)
# this is temporary, to ease transition
onload = on_start
def on_stop(param=None, **kwargs):
"""External on_stop decorator. Can be used directly as a decorator, or with args to return a decorator
:type param: function | None
"""
def _on_stop_hook(func):
hook = _get_hook(func, "on_stop")
if hook is None:
hook = _Hook(func, "on_stop")
_add_hook(func, hook)
hook._add_hook(kwargs)
return func
if callable(param):
return _on_stop_hook(param)
return lambda func: _on_stop_hook(func)
on_unload = on_stop
def on_cap_available(*caps, **kwargs):
"""External on_cap_available decorator. Must be used as a function that returns a decorator
This hook will fire for each capability in a `CAP LS` response from the server
"""
def _on_cap_available_hook(func):
hook = _get_hook(func, "on_cap_available")
if hook is None:
hook = _CapHook(func, "available")
_add_hook(func, hook)
hook.add_hook(caps, kwargs)
return func
return _on_cap_available_hook
def on_cap_ack(*caps, **kwargs):
"""External on_cap_ack decorator. Must be used as a function that returns a decorator
This hook will fire for each capability that is acknowledged from the server with `CAP ACK`
"""
def _on_cap_ack_hook(func):
hook = _get_hook(func, "on_cap_ack")
if hook is None:
hook = _CapHook(func, "ack")
_add_hook(func, hook)
hook.add_hook(caps, kwargs)
return func
return _on_cap_ack_hook
def on_connect(param=None, **kwargs):
def _on_connect_hook(func):
hook = _get_hook(func, "on_connect")
if hook is None:
hook = _Hook(func, "on_connect")
_add_hook(func, hook)
hook._add_hook(kwargs)
return func
if callable(param):
return _on_connect_hook(param)
return lambda func: _on_connect_hook(func)
connect = on_connect
def irc_out(param=None, **kwargs):
def _decorate(func):
hook = _get_hook(func, "irc_out")
if hook is None:
hook = _Hook(func, "irc_out")
_add_hook(func, hook)
hook._add_hook(kwargs)
return func
if callable(param):
return _decorate(param)
return lambda func: _decorate(func)
def post_hook(param=None, **kwargs):
"""
This hook will be fired just after a hook finishes executing
"""
def _decorate(func):
hook = _get_hook(func, "post_hook")
if hook is None:
hook = _Hook(func, "post_hook")
_add_hook(func, hook)
hook._add_hook(kwargs)
return func
if callable(param):
return _decorate(param)
return lambda func: _decorate(func)
def permission(*perms, **kwargs):
def _perm_hook(func):
hook = _get_hook(func, "perm_check")
if hook is None:
hook = _PermissionHook(func)
_add_hook(func, hook)
hook.add_hook(perms, kwargs)
return func
return lambda func: _perm_hook(func)
| gpl-3.0 | -573,451,123,311,893,000 | 26.901381 | 112 | 0.588294 | false | 3.807806 | false | false | false |
collinstocks/eventlet | eventlet/greenio/base.py | 1 | 17181 | import errno
import os
import socket
import sys
import time
import warnings
import eventlet
from eventlet.hubs import trampoline, notify_opened, IOClosed
from eventlet.support import get_errno, six
__all__ = [
'GreenSocket', '_GLOBAL_DEFAULT_TIMEOUT', 'set_nonblocking',
'SOCKET_BLOCKING', 'SOCKET_CLOSED', 'CONNECT_ERR', 'CONNECT_SUCCESS',
'shutdown_safe', 'SSL',
]
BUFFER_SIZE = 4096
CONNECT_ERR = set((errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK))
CONNECT_SUCCESS = set((0, errno.EISCONN))
if sys.platform[:3] == "win":
CONNECT_ERR.add(errno.WSAEINVAL) # Bug 67
if six.PY2:
_python2_fileobject = socket._fileobject
_original_socket = eventlet.patcher.original('socket').socket
def socket_connect(descriptor, address):
"""
Attempts to connect to the address, returns the descriptor if it succeeds,
returns None if it needs to trampoline, and raises any exceptions.
"""
err = descriptor.connect_ex(address)
if err in CONNECT_ERR:
return None
if err not in CONNECT_SUCCESS:
raise socket.error(err, errno.errorcode[err])
return descriptor
def socket_checkerr(descriptor):
err = descriptor.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err not in CONNECT_SUCCESS:
raise socket.error(err, errno.errorcode[err])
def socket_accept(descriptor):
"""
Attempts to accept() on the descriptor, returns a client,address tuple
if it succeeds; returns None if it needs to trampoline, and raises
any exceptions.
"""
try:
return descriptor.accept()
except socket.error as e:
if get_errno(e) == errno.EWOULDBLOCK:
return None
raise
if sys.platform[:3] == "win":
# winsock sometimes throws ENOTCONN
SOCKET_BLOCKING = set((errno.EAGAIN, errno.EWOULDBLOCK,))
SOCKET_CLOSED = set((errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN))
else:
# oddly, on linux/darwin, an unconnected socket is expected to block,
# so we treat ENOTCONN the same as EWOULDBLOCK
SOCKET_BLOCKING = set((errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOTCONN))
SOCKET_CLOSED = set((errno.ECONNRESET, errno.ESHUTDOWN, errno.EPIPE))
def set_nonblocking(fd):
"""
Sets the descriptor to be nonblocking. Works on many file-like
objects as well as sockets. Only sockets can be nonblocking on
Windows, however.
"""
try:
setblocking = fd.setblocking
except AttributeError:
# fd has no setblocking() method. It could be that this version of
# Python predates socket.setblocking(). In that case, we can still set
# the flag "by hand" on the underlying OS fileno using the fcntl
# module.
try:
import fcntl
except ImportError:
# Whoops, Windows has no fcntl module. This might not be a socket
# at all, but rather a file-like object with no setblocking()
# method. In particular, on Windows, pipes don't support
# non-blocking I/O and therefore don't have that method. Which
# means fcntl wouldn't help even if we could load it.
raise NotImplementedError("set_nonblocking() on a file object "
"with no setblocking() method "
"(Windows pipes don't support non-blocking I/O)")
# We managed to import fcntl.
fileno = fd.fileno()
orig_flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
new_flags = orig_flags | os.O_NONBLOCK
if new_flags != orig_flags:
fcntl.fcntl(fileno, fcntl.F_SETFL, new_flags)
else:
# socket supports setblocking()
setblocking(0)
try:
from socket import _GLOBAL_DEFAULT_TIMEOUT
except ImportError:
_GLOBAL_DEFAULT_TIMEOUT = object()
class GreenSocket(object):
"""
Green version of socket.socket class, that is intended to be 100%
API-compatible.
It also recognizes the keyword parameter, 'set_nonblocking=True'.
Pass False to indicate that socket is already in non-blocking mode
to save syscalls.
"""
# This placeholder is to prevent __getattr__ from creating an infinite call loop
fd = None
def __init__(self, family_or_realsock=socket.AF_INET, *args, **kwargs):
should_set_nonblocking = kwargs.pop('set_nonblocking', True)
if isinstance(family_or_realsock, six.integer_types):
fd = _original_socket(family_or_realsock, *args, **kwargs)
# Notify the hub that this is a newly-opened socket.
notify_opened(fd.fileno())
else:
fd = family_or_realsock
# import timeout from other socket, if it was there
try:
self._timeout = fd.gettimeout() or socket.getdefaulttimeout()
except AttributeError:
self._timeout = socket.getdefaulttimeout()
if should_set_nonblocking:
set_nonblocking(fd)
self.fd = fd
# when client calls setblocking(0) or settimeout(0) the socket must
# act non-blocking
self.act_non_blocking = False
# Copy some attributes from underlying real socket.
# This is the easiest way that i found to fix
# https://bitbucket.org/eventlet/eventlet/issue/136
# Only `getsockopt` is required to fix that issue, others
# are just premature optimization to save __getattr__ call.
self.bind = fd.bind
self.close = fd.close
self.fileno = fd.fileno
self.getsockname = fd.getsockname
self.getsockopt = fd.getsockopt
self.listen = fd.listen
self.setsockopt = fd.setsockopt
self.shutdown = fd.shutdown
self._closed = False
@property
def _sock(self):
return self
if six.PY3:
def _get_io_refs(self):
return self.fd._io_refs
def _set_io_refs(self, value):
self.fd._io_refs = value
_io_refs = property(_get_io_refs, _set_io_refs)
# Forward unknown attributes to fd, cache the value for future use.
# I do not see any simple attribute which could be changed
# so caching everything in self is fine.
# If we find such attributes - only attributes having __get__ might be cached.
# For now - I do not want to complicate it.
def __getattr__(self, name):
if self.fd is None:
raise AttributeError(name)
attr = getattr(self.fd, name)
setattr(self, name, attr)
return attr
def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
""" We need to trampoline via the event hub.
We catch any signal back from the hub indicating that the operation we
were waiting on was associated with a filehandle that's since been
invalidated.
"""
if self._closed:
# If we did any logging, alerting to a second trampoline attempt on a closed
# socket here would be useful.
raise IOClosed()
try:
return trampoline(fd, read=read, write=write, timeout=timeout,
timeout_exc=timeout_exc,
mark_as_closed=self._mark_as_closed)
except IOClosed:
# This socket's been obsoleted. De-fang it.
self._mark_as_closed()
raise
def accept(self):
if self.act_non_blocking:
return self.fd.accept()
fd = self.fd
while True:
res = socket_accept(fd)
if res is not None:
client, addr = res
set_nonblocking(client)
return type(self)(client), addr
self._trampoline(fd, read=True, timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
def _mark_as_closed(self):
""" Mark this socket as being closed """
self._closed = True
def __del__(self):
# This is in case self.close is not assigned yet (currently the constructor does it)
close = getattr(self, 'close', None)
if close is not None:
close()
def connect(self, address):
if self.act_non_blocking:
return self.fd.connect(address)
fd = self.fd
if self.gettimeout() is None:
while not socket_connect(fd, address):
try:
self._trampoline(fd, write=True)
except IOClosed:
raise socket.error(errno.EBADFD)
socket_checkerr(fd)
else:
end = time.time() + self.gettimeout()
while True:
if socket_connect(fd, address):
return
if time.time() >= end:
raise socket.timeout("timed out")
try:
self._trampoline(fd, write=True, timeout=end - time.time(),
timeout_exc=socket.timeout("timed out"))
except IOClosed:
# ... we need some workable errno here.
raise socket.error(errno.EBADFD)
socket_checkerr(fd)
def connect_ex(self, address):
if self.act_non_blocking:
return self.fd.connect_ex(address)
fd = self.fd
if self.gettimeout() is None:
while not socket_connect(fd, address):
try:
self._trampoline(fd, write=True)
socket_checkerr(fd)
except socket.error as ex:
return get_errno(ex)
except IOClosed:
return errno.EBADFD
else:
end = time.time() + self.gettimeout()
while True:
try:
if socket_connect(fd, address):
return 0
if time.time() >= end:
raise socket.timeout(errno.EAGAIN)
self._trampoline(fd, write=True, timeout=end - time.time(),
timeout_exc=socket.timeout(errno.EAGAIN))
socket_checkerr(fd)
except socket.error as ex:
return get_errno(ex)
except IOClosed:
return errno.EBADFD
def dup(self, *args, **kw):
sock = self.fd.dup(*args, **kw)
newsock = type(self)(sock, set_nonblocking=False)
newsock.settimeout(self.gettimeout())
return newsock
if six.PY3:
def makefile(self, *args, **kwargs):
return _original_socket.makefile(self, *args, **kwargs)
else:
def makefile(self, *args, **kwargs):
dupped = self.dup()
res = _python2_fileobject(dupped, *args, **kwargs)
if hasattr(dupped, "_drop"):
dupped._drop()
return res
def makeGreenFile(self, *args, **kw):
warnings.warn("makeGreenFile has been deprecated, please use "
"makefile instead", DeprecationWarning, stacklevel=2)
return self.makefile(*args, **kw)
def _read_trampoline(self):
self._trampoline(
self.fd,
read=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
def _recv_loop(self, recv_meth, *args):
fd = self.fd
if self.act_non_blocking:
return recv_meth(*args)
while True:
try:
# recv: bufsize=0?
# recv_into: buffer is empty?
# This is needed because behind the scenes we use sockets in
# nonblocking mode and builtin recv* methods. Attempting to read
# 0 bytes from a nonblocking socket using a builtin recv* method
# does not raise a timeout exception. Since we're simulating
# a blocking socket here we need to produce a timeout exception
# if needed, hence the call to trampoline.
if not args[0]:
self._read_trampoline()
return recv_meth(*args)
except socket.error as e:
if get_errno(e) in SOCKET_BLOCKING:
pass
elif get_errno(e) in SOCKET_CLOSED:
return b''
else:
raise
try:
self._read_trampoline()
except IOClosed as e:
# Perhaps we should return '' instead?
raise EOFError()
def recv(self, bufsize, flags=0):
return self._recv_loop(self.fd.recv, bufsize, flags)
def recvfrom(self, bufsize, flags=0):
return self._recv_loop(self.fd.recvfrom, bufsize, flags)
def recv_into(self, buffer, nbytes=0, flags=0):
return self._recv_loop(self.fd.recv_into, buffer, nbytes, flags)
def recvfrom_into(self, buffer, nbytes=0, flags=0):
return self._recv_loop(self.fd.recvfrom_into, buffer, nbytes, flags)
def _send_loop(self, send_method, data, *args):
if self.act_non_blocking:
return send_method(data, *args)
while 1:
try:
return send_method(data, *args)
except socket.error as e:
eno = get_errno(e)
if eno == errno.ENOTCONN or eno not in SOCKET_BLOCKING:
raise
try:
self._trampoline(self.fd, write=True, timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
except IOClosed:
raise socket.error(errno.ECONNRESET, 'Connection closed by another thread')
def send(self, data, flags=0):
return self._send_loop(self.fd.send, data, flags)
def sendto(self, data, *args):
return self._send_loop(self.fd.sendto, data, *args)
def sendall(self, data, flags=0):
tail = self.send(data, flags)
len_data = len(data)
while tail < len_data:
tail += self.send(data[tail:], flags)
def setblocking(self, flag):
if flag:
self.act_non_blocking = False
self._timeout = None
else:
self.act_non_blocking = True
self._timeout = 0.0
def settimeout(self, howlong):
if howlong is None or howlong == _GLOBAL_DEFAULT_TIMEOUT:
self.setblocking(True)
return
try:
f = howlong.__float__
except AttributeError:
raise TypeError('a float is required')
howlong = f()
if howlong < 0.0:
raise ValueError('Timeout value out of range')
if howlong == 0.0:
self.act_non_blocking = True
self._timeout = 0.0
else:
self.act_non_blocking = False
self._timeout = howlong
def gettimeout(self):
return self._timeout
if "__pypy__" in sys.builtin_module_names:
def _reuse(self):
getattr(self.fd, '_sock', self.fd)._reuse()
def _drop(self):
getattr(self.fd, '_sock', self.fd)._drop()
def _operation_on_closed_file(*args, **kwargs):
raise ValueError("I/O operation on closed file")
greenpipe_doc = """
GreenPipe is a cooperative replacement for file class.
It will cooperate on pipes. It will block on regular file.
Differneces from file class:
- mode is r/w property. Should re r/o
- encoding property not implemented
- write/writelines will not raise TypeError exception when non-string data is written
it will write str(data) instead
- Universal new lines are not supported and newlines property not implementeded
- file argument can be descriptor, file name or file object.
"""
# import SSL module here so we can refer to greenio.SSL.exceptionclass
try:
from OpenSSL import SSL
except ImportError:
# pyOpenSSL not installed, define exceptions anyway for convenience
class SSL(object):
class WantWriteError(Exception):
pass
class WantReadError(Exception):
pass
class ZeroReturnError(Exception):
pass
class SysCallError(Exception):
pass
def shutdown_safe(sock):
""" Shuts down the socket. This is a convenience method for
code that wants to gracefully handle regular sockets, SSL.Connection
sockets from PyOpenSSL and ssl.SSLSocket objects from Python 2.6
interchangeably. Both types of ssl socket require a shutdown() before
close, but they have different arity on their shutdown method.
Regular sockets don't need a shutdown before close, but it doesn't hurt.
"""
try:
try:
# socket, ssl.SSLSocket
return sock.shutdown(socket.SHUT_RDWR)
except TypeError:
# SSL.Connection
return sock.shutdown()
except socket.error as e:
# we don't care if the socket is already closed;
# this will often be the case in an http server context
if get_errno(e) not in (errno.ENOTCONN, errno.EBADF):
raise
| mit | 3,253,789,823,497,832,000 | 34.645228 | 92 | 0.584483 | false | 4.158035 | false | false | false |
mgrygoriev/CloudFerry | cloudferrylib/os/estimation/procedures.py | 1 | 6018 | # Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import heapq
from cloudferrylib.os.discovery import nova
from cloudferrylib.os.discovery import cinder
from cloudferrylib.os.discovery import glance
from cloudferrylib.os.discovery import model
from cloudferrylib.utils import sizeof_format
def list_filtered(session, cls, cloud_name, tenant):
return (x for x in session.list(cls, cloud_name)
if tenant is None or tenant == x.tenant.object_id.id)
def estimate_copy(cfg, migration_name):
migration = cfg.migrations[migration_name]
query = migration.query
src_cloud = migration.source
with model.Session() as session:
total_ephemeral_size = 0
total_volume_size = 0
total_image_size = 0
accounted_volumes = set()
accounted_images = set()
for server in query.search(session, src_cloud, nova.Server):
for ephemeral_disk in server.ephemeral_disks:
total_ephemeral_size += ephemeral_disk.size
if server.image is not None \
and server.image.object_id not in accounted_images:
total_image_size += server.image.size
accounted_images.add(server.image.object_id)
for volume in server.attached_volumes:
if volume.object_id not in accounted_volumes:
total_volume_size += volume.size
accounted_volumes.add(volume.object_id)
for volume in query.search(session, src_cloud, cinder.Volume):
if volume.object_id not in accounted_volumes:
total_volume_size += volume.size
for image in query.search(session, src_cloud, glance.Image):
if image.object_id not in accounted_images:
total_image_size += image.size
print 'Migration', migration_name, 'estimates:'
print 'Images:'
print ' Size:', sizeof_format.sizeof_fmt(total_image_size)
print 'Ephemeral disks:'
print ' Size:', sizeof_format.sizeof_fmt(total_ephemeral_size)
print 'Volumes:'
print ' Size:', sizeof_format.sizeof_fmt(total_volume_size, 'G')
def show_largest_servers(cfg, count, migration_name):
def server_size(server):
size = 0
if server.image is not None:
size += server.image.size
for ephemeral_disk in server.ephemeral_disks:
size += ephemeral_disk.size
for volume in server.attached_volumes:
size += volume.size
return size
output = []
migration = cfg.migrations[migration_name]
with model.Session() as session:
for index, server in enumerate(
heapq.nlargest(
count,
migration.query.search(session, migration.source,
nova.Server),
key=server_size),
start=1):
output.append(
' {0}. {1.object_id.id} {1.name} - {2}'.format(
index, server,
sizeof_format.sizeof_fmt(server_size(server))))
if output:
print '\n{0} largest servers:'.format(len(output))
for line in output:
print line
def show_largest_unused_resources(count, cloud_name, tenant):
with model.Session() as session:
used_volumes = set()
used_images = set()
servers = list_filtered(session, nova.Server, cloud_name, tenant)
for server in servers:
if server.image is not None:
used_images.add(server.image.object_id)
for volume in server.attached_volumes:
used_volumes.add(volume.object_id)
# Find unused volumes
volumes_output = []
volumes_size = 0
volumes = list_filtered(session, cinder.Volume, cloud_name, tenant)
for index, volume in enumerate(
heapq.nlargest(count,
(v for v in volumes
if v.object_id not in used_volumes),
key=lambda v: v.size),
start=1):
volumes_size += volume.size
size = sizeof_format.sizeof_fmt(volume.size, 'G')
volumes_output.append(
' {0:3d}. {1.object_id.id} {2:10s} {1.name}'.format(
index, volume, size))
# Find unused images
images_output = []
images_size = 0
images = list_filtered(session, glance.Image, cloud_name, tenant)
for index, image in enumerate(
heapq.nlargest(count,
(i for i in images
if i.object_id not in used_images),
key=lambda i: i.size),
start=1):
images_size += image.size
size = sizeof_format.sizeof_fmt(image.size)
images_output.append(
' {0:3d}. {1.object_id.id} {2:10s} {1.name}'.format(
index, image, size))
# Output result
if volumes_output:
print '\n{0} largest unused volumes:'.format(len(volumes_output))
for line in volumes_output:
print line
print ' Total:', sizeof_format.sizeof_fmt(volumes_size, 'G')
if images_output:
print '\n{0} largest unused images:'.format(len(images_output))
for line in images_output:
print line
print ' Total:', sizeof_format.sizeof_fmt(images_size)
| apache-2.0 | 263,699,053,650,078,880 | 38.333333 | 75 | 0.588069 | false | 4.116279 | false | false | false |
rksaxena/crawler_templates | crawler_type2/crawler_type2/spiders/type_two.py | 1 | 1779 | # -*- coding: utf-8 -*-
import scrapy
import crawler_type2.config as config
from crawler_type2.items import CrawlerType2Item
import urlparse
class TypeTwo(scrapy.Spider):
name = 'crawler_type2'
def start_requests(self):
source = getattr(self, 'source', None)
if source is None or source not in config.SOURCES:
raise Exception("Invalid source!!!")
self.crawl_source = config.SOURCES[source]
self.allowed_domains = self.crawl_source['ALLOWED_DOMAINS']
self.start_urls = self.crawl_source['START_URLS']
yield scrapy.Request(self.start_urls[0], callback=self.parse)
def parse(self, response):
for href in response.xpath(self.crawl_source['LIST_PAGE_XPATH']):
url = urlparse.urljoin(self.crawl_source['BASE_URL'], href.extract())
print 'Sending request for url : ' + url
req = scrapy.Request(url, callback=self.parse_item)
# for key in response.meta.keys():
# req.meta[key] = response.meta[key]
yield req
def parse_item(self, response):
print "parse item for url %s" % (response.request.url)
item = CrawlerType2Item()
for element in response.xpath(self.crawl_source['BLOG_CONTENT_XPATH']):
heading = element.xpath(self.crawl_source['HEADING_XPATH']).extract()
text = element.xpath(self.crawl_source['TEXT_XPATH']).extract()
heading = [t.strip() for t in heading]
text = [t.strip() for t in text]
item['heading'] = " ".join(heading)
item['text'] = " ".join(text)
item['img'] = element.xpath(self.crawl_source['IMG_XPATH']).extract()
if 'text' in item and len(item['text']) > 0:
yield item
| mit | 1,685,810,864,093,220,900 | 39.431818 | 81 | 0.610455 | false | 3.72956 | false | false | false |
piaoyaoshi/inf1340_2015_asst1 | exercise3.py | 1 | 3174 | #!/usr/bin/env python
""" Assignment 1, Exercise 3, INF1340, Fall, 2015. Troubleshooting Car Issues.
This module contains one function diagnose_car(). It is an expert system to
interactive diagnose car issues.
"""
__author__ = 'Susan Sim'
__email__ = "[email protected]"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
# ERROR_MESSAGE in case of improper user input
ERROR_MESSAGE = 'I don\'t understand'
# ========================================== Main Function =================================================
def diagnose_car():
"""
Interactively queries the user with yes/no questions to identify a
possible issue with a car.
Test Cases
Inputs:Y,N
Expected: Replace cables and try again.
Inputs:Y,Y
Expected: Clean terminals and try starting again.
Inputs:N,Y
Expected:Replace the battery.
Inputs:N,N,Y
Expected:Check spark plug connections.
Inputs:N,N,N,N
Expected: Engine is not getting enough fuel. Clean fuel pump.
Inputs:N,N,N,Y,Y
Expected: Get it in for service.
Inputs:N,N,N,Y,N
Expected: Check to ensure the choke is opening and closing.
"""
check_silence()
# diagnose_car()
# ========================================== Helper Functions ================================================
# Each function represents a box on flowchart and supports re-usability
# This function is the first box in the flow chart
def check_silence():
silent = raw_input("Is the car silent when you turn the key?")
if silent == "Y":
check_battery()
elif silent == "N":
check_noise()
else:
print (ERROR_MESSAGE)
check_silence()
# This is the left side of the flowchart
def check_battery():
corroded = raw_input("Are the battery terminals corroded?")
if corroded == "Y":
print ("Clean terminals and try starting again.")
elif corroded == "N":
print ("Replace cables and try again.")
else:
print (ERROR_MESSAGE)
check_battery()
# Everything below is the right side of the flow chart
def check_noise():
click = raw_input("Does the car make a clicking noise?")
if click == "Y":
print ("Replace the battery.")
elif click == "N":
check_crank()
else:
print (ERROR_MESSAGE)
check_noise()
def check_crank():
crank = raw_input("Does the car crank up but fail to start?")
if crank == "Y":
print ("Check spark plug connections.")
elif crank == "N":
check_engine()
else:
print (ERROR_MESSAGE)
check_crank()
def check_engine():
engine = raw_input("Does engine start and then die?")
if engine == "Y":
check_fuel()
elif engine == "N":
print ("Engine is not getting enough fuel. Clean fuel pump.")
else:
print (ERROR_MESSAGE)
check_engine()
def check_fuel():
fuel = raw_input("Does your car have fuel injection?")
if fuel == "N":
print ("Check to ensure the choke is opening and closing.")
elif fuel == "Y":
print ("Get it in for service.")
else:
print (ERROR_MESSAGE)
check_fuel()
| mit | -2,540,198,713,976,936,400 | 23.604651 | 110 | 0.587902 | false | 3.792115 | false | false | false |
cnddu/cactusblog | form/post.py | 1 | 1329 | #!/usr/bin/env python
# coding=utf-8
#
# Copyright 2013 meiritugua.com
from wtforms import TextField, HiddenField, validators
from lib.forms import Form
class NewForm(Form):
title = TextField('Title', [
validators.Required(message = "请填写帖子标题"),
validators.Length(min = 3, message = "帖子标题长度过短(3-100个字符)"),
validators.Length(max = 100, message = "帖子标题长度过长(3-100个字符)"),
])
content = TextField('Content', [
validators.Required(message = "请填写帖子简介"),
])
tag= TextField('Tag', [
validators.Required(message = "请填写帖子标签"),
])
class EditTagForm(Form):
name = TextField('Name', [
validators.Required(message = "请填写标签名称"),
validators.Length(min = 3, message = "标签名称长度过短(3-100个字符)"),
validators.Length(max = 100, message = "标签名称长度过长(3-100个字符)"),
])
intro = TextField('Intro', [
validators.Optional(),
])
category = TextField('Category', [
validators.Optional(),
])
tag_type = TextField('Tag_type', [
validators.Optional(),
])
tag = TextField('Tag', [
validators.Optional(),
])
| gpl-3.0 | -6,134,245,195,717,030,000 | 23.354167 | 70 | 0.585115 | false | 2.750588 | false | false | false |
GuillaumeSeren/alot | alot/helper.py | 1 | 20203 | # -*- coding: utf-8 -*-
# Copyright (C) 2011-2012 Patrick Totzke <[email protected]>
# Copyright © 2017-2018 Dylan Baker
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
from datetime import timedelta
from datetime import datetime
from collections import deque
import logging
import mimetypes
import os
import re
import shlex
import subprocess
import email
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
import asyncio
import urwid
import magic
def split_commandline(s, comments=False, posix=True):
"""
splits semi-colon separated commandlines
"""
# shlex seems to remove unescaped quotes and backslashes
s = s.replace('\\', '\\\\')
s = s.replace('\'', '\\\'')
s = s.replace('\"', '\\\"')
lex = shlex.shlex(s, posix=posix)
lex.whitespace_split = True
lex.whitespace = ';'
if not comments:
lex.commenters = ''
return list(lex)
def split_commandstring(cmdstring):
"""
split command string into a list of strings to pass on to subprocess.Popen
and the like. This simply calls shlex.split but works also with unicode
bytestrings.
"""
assert isinstance(cmdstring, str)
return shlex.split(cmdstring)
def string_sanitize(string, tab_width=8):
r"""
strips, and replaces non-printable characters
:param tab_width: number of spaces to replace tabs with. Read from
`globals.tabwidth` setting if `None`
:type tab_width: int or `None`
>>> string_sanitize(' foo\rbar ', 8)
' foobar '
>>> string_sanitize('foo\tbar', 8)
'foo bar'
>>> string_sanitize('foo\t\tbar', 8)
'foo bar'
"""
string = string.replace('\r', '')
lines = list()
for line in string.split('\n'):
tab_count = line.count('\t')
if tab_count > 0:
line_length = 0
new_line = list()
for i, chunk in enumerate(line.split('\t')):
line_length += len(chunk)
new_line.append(chunk)
if i < tab_count:
next_tab_stop_in = tab_width - (line_length % tab_width)
new_line.append(' ' * next_tab_stop_in)
line_length += next_tab_stop_in
lines.append(''.join(new_line))
else:
lines.append(line)
return '\n'.join(lines)
def string_decode(string, enc='ascii'):
"""
safely decodes string to unicode bytestring, respecting `enc` as a hint.
:param string: the string to decode
:type string: str or unicode
:param enc: a hint what encoding is used in string ('ascii', 'utf-8', ...)
:type enc: str
:returns: the unicode decoded input string
:rtype: unicode
"""
if enc is None:
enc = 'ascii'
try:
string = str(string, enc, errors='replace')
except LookupError: # malformed enc string
string = string.decode('ascii', errors='replace')
except TypeError: # already str
pass
return string
def shorten(string, maxlen):
"""shortens string if longer than maxlen, appending ellipsis"""
if 1 < maxlen < len(string):
string = string[:maxlen - 1] + '…'
return string[:maxlen]
def shorten_author_string(authors_string, maxlength):
"""
Parse a list of authors concatenated as a text string (comma
separated) and smartly adjust them to maxlength.
1) If the complete list of sender names does not fit in maxlength, it
tries to shorten names by using only the first part of each.
2) If the list is still too long, hide authors according to the
following priority:
- First author is always shown (if too long is shorten with ellipsis)
- If possible, last author is also shown (if too long, uses ellipsis)
- If there are more than 2 authors in the thread, show the
maximum of them. More recent senders have higher priority.
- If it is finally necessary to hide any author, an ellipsis
between first and next authors is added.
"""
# I will create a list of authors by parsing author_string. I use
# deque to do popleft without performance penalties
authors = deque()
# If author list is too long, it uses only the first part of each
# name (gmail style)
short_names = len(authors_string) > maxlength
for au in authors_string.split(", "):
if short_names:
author_as_list = au.split()
if len(author_as_list) > 0:
authors.append(author_as_list[0])
else:
authors.append(au)
# Author chain will contain the list of author strings to be
# concatenated using commas for the final formatted author_string.
authors_chain = deque()
if len(authors) == 0:
return ''
# reserve space for first author
first_au = shorten(authors.popleft(), maxlength)
remaining_length = maxlength - len(first_au)
# Tries to add an ellipsis if no space to show more than 1 author
if authors and maxlength > 3 and remaining_length < 3:
first_au = shorten(first_au, maxlength - 3)
remaining_length += 3
# Tries to add as more authors as possible. It takes into account
# that if any author will be hidden, and ellipsis should be added
while authors and remaining_length >= 3:
au = authors.pop()
if len(au) > 1 and (remaining_length == 3 or (authors and
remaining_length < 7)):
authors_chain.appendleft('…')
break
else:
if authors:
# 5= ellipsis + 2 x comma and space used as separators
au_string = shorten(au, remaining_length - 5)
else:
# 2 = comma and space used as separator
au_string = shorten(au, remaining_length - 2)
remaining_length -= len(au_string) + 2
authors_chain.appendleft(au_string)
# Add the first author to the list and concatenate list
authors_chain.appendleft(first_au)
authorsstring = ', '.join(authors_chain)
return authorsstring
def pretty_datetime(d):
"""
translates :class:`datetime` `d` to a "sup-style" human readable string.
>>> now = datetime.now()
>>> now.strftime('%c')
'Sat 31 Mar 2012 14:47:26 '
>>> pretty_datetime(now)
'just now'
>>> pretty_datetime(now - timedelta(minutes=1))
'1min ago'
>>> pretty_datetime(now - timedelta(hours=5))
'5h ago'
>>> pretty_datetime(now - timedelta(hours=12))
'02:54am'
>>> pretty_datetime(now - timedelta(days=1))
'yest 02pm'
>>> pretty_datetime(now - timedelta(days=2))
'Thu 02pm'
>>> pretty_datetime(now - timedelta(days=7))
'Mar 24'
>>> pretty_datetime(now - timedelta(days=356))
'Apr 2011'
"""
ampm = d.strftime('%p').lower()
if len(ampm):
hourfmt = '%I' + ampm
hourminfmt = '%I:%M' + ampm
else:
hourfmt = '%Hh'
hourminfmt = '%H:%M'
now = datetime.now()
today = now.date()
if d.date() == today or d > now - timedelta(hours=6):
delta = datetime.now() - d
if delta.seconds < 60:
string = 'just now'
elif delta.seconds < 3600:
string = '%dmin ago' % (delta.seconds // 60)
elif delta.seconds < 6 * 3600:
string = '%dh ago' % (delta.seconds // 3600)
else:
string = d.strftime(hourminfmt)
elif d.date() == today - timedelta(1):
string = d.strftime('yest ' + hourfmt)
elif d.date() > today - timedelta(7):
string = d.strftime('%a ' + hourfmt)
elif d.year != today.year:
string = d.strftime('%b %Y')
else:
string = d.strftime('%b %d')
return string_decode(string, 'UTF-8')
def call_cmd(cmdlist, stdin=None):
"""
get a shell commands output, error message and return value and immediately
return.
.. warning::
This returns with the first screen content for interactive commands.
:param cmdlist: shellcommand to call, already splitted into a list accepted
by :meth:`subprocess.Popen`
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str, bytes, or None
:return: triple of stdout, stderr, return value of the shell command
:rtype: str, str, int
"""
termenc = urwid.util.detected_encoding
if isinstance(stdin, str):
stdin = stdin.encode(termenc)
try:
logging.debug("Calling %s" % cmdlist)
proc = subprocess.Popen(
cmdlist,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE if stdin is not None else None)
except OSError as e:
out = b''
err = e.strerror
ret = e.errno
else:
out, err = proc.communicate(stdin)
ret = proc.returncode
out = string_decode(out, termenc)
err = string_decode(err, termenc)
return out, err, ret
async def call_cmd_async(cmdlist, stdin=None, env=None):
"""Given a command, call that command asynchronously and return the output.
This function only handles `OSError` when creating the subprocess, any
other exceptions raised either durring subprocess creation or while
exchanging data with the subprocess are the caller's responsibility to
handle.
If such an `OSError` is caught, then returncode will be set to 1, and the
error value will be set to the str() value of the exception.
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str
:return: Tuple of stdout, stderr, returncode
:rtype: tuple[str, str, int]
"""
termenc = urwid.util.detected_encoding
cmdlist = [s.encode(termenc) for s in cmdlist]
environment = os.environ.copy()
if env is not None:
environment.update(env)
logging.debug('ENV = %s', environment)
logging.debug('CMD = %s', cmdlist)
try:
proc = await asyncio.create_subprocess_exec(
*cmdlist,
env=environment,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
stdin=asyncio.subprocess.PIPE if stdin else None)
except OSError as e:
return ('', str(e), 1)
out, err = await proc.communicate(stdin.encode(termenc) if stdin else None)
return (out.decode(termenc), err.decode(termenc), proc.returncode)
def guess_mimetype(blob):
"""
uses file magic to determine the mime-type of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: mime-type, falls back to 'application/octet-stream'
:rtype: str
"""
mimetype = 'application/octet-stream'
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However, it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_TYPE)
m.load()
magictype = m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
# cf. issue #841
magictype = magic.from_buffer(blob, mime=True) or magictype
else:
raise Exception('Unknown magic API')
# libmagic does not always return proper mimetype strings, cf. issue #459
if re.match(r'\w+\/\w+', magictype):
mimetype = magictype
return mimetype
def guess_encoding(blob):
"""
uses file magic to determine the encoding of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: encoding
:rtype: str
"""
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_ENCODING)
m.load()
return m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
m = magic.Magic(mime_encoding=True)
return m.from_buffer(blob)
else:
raise Exception('Unknown magic API')
def try_decode(blob):
"""Guess the encoding of blob and try to decode it into a str.
:param bytes blob: The bytes to decode
:returns: the decoded blob
:rtype: str
"""
assert isinstance(blob, bytes), 'cannot decode a str or non-bytes object'
return blob.decode(guess_encoding(blob))
def libmagic_version_at_least(version):
"""
checks if the libmagic library installed is more recent than a given
version.
:param version: minimum version expected in the form XYY (i.e. 5.14 -> 514)
with XYY >= 513
"""
if hasattr(magic, 'open'):
magic_wrapper = magic._libraries['magic']
elif hasattr(magic, 'from_buffer'):
magic_wrapper = magic.libmagic
else:
raise Exception('Unknown magic API')
if not hasattr(magic_wrapper, 'magic_version'):
# The magic_version function has been introduced in libmagic 5.13,
# if it's not present, we can't guess right, so let's assume False
return False
# Depending on the libmagic/ctypes version, magic_version is a function or
# a callable:
if callable(magic_wrapper.magic_version):
return magic_wrapper.magic_version() >= version
return magic_wrapper.magic_version >= version
# TODO: make this work on blobs, not paths
def mimewrap(path, filename=None, ctype=None):
"""Take the contents of the given path and wrap them into an email MIME
part according to the content type. The content type is auto detected from
the actual file contents and the file name if it is not given.
:param path: the path to the file contents
:type path: str
:param filename: the file name to use in the generated MIME part
:type filename: str or None
:param ctype: the content type of the file contents in path
:type ctype: str or None
:returns: the message MIME part storing the data from path
:rtype: subclasses of email.mime.base.MIMEBase
"""
with open(path, 'rb') as f:
content = f.read()
if not ctype:
ctype = guess_mimetype(content)
# libmagic < 5.12 incorrectly detects excel/powerpoint files as
# 'application/msword' (see #179 and #186 in libmagic bugtracker)
# This is a workaround, based on file extension, useful as long
# as distributions still ship libmagic 5.11.
if (ctype == 'application/msword' and
not libmagic_version_at_least(513)):
mimetype, _ = mimetypes.guess_type(path)
if mimetype:
ctype = mimetype
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
part = MIMEText(content.decode(guess_encoding(content), 'replace'),
_subtype=subtype,
_charset='utf-8')
elif maintype == 'image':
part = MIMEImage(content, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(content, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(content)
# Encode the payload using Base64
email.encoders.encode_base64(part)
# Set the filename parameter
if not filename:
filename = os.path.basename(path)
part.add_header('Content-Disposition', 'attachment',
filename=filename)
return part
def shell_quote(text):
"""Escape the given text for passing it to the shell for interpretation.
The resulting string will be parsed into one "word" (in the sense used in
the shell documentation, see sh(1)) by the shell.
:param text: the text to quote
:type text: str
:returns: the quoted text
:rtype: str
"""
return "'%s'" % text.replace("'", """'"'"'""")
def humanize_size(size):
"""Create a nice human readable representation of the given number
(understood as bytes) using the "KiB" and "MiB" suffixes to indicate
kibibytes and mebibytes. A kibibyte is defined as 1024 bytes (as opposed to
a kilobyte which is 1000 bytes) and a mibibyte is 1024**2 bytes (as opposed
to a megabyte which is 1000**2 bytes).
:param size: the number to convert
:type size: int
:returns: the human readable representation of size
:rtype: str
"""
for factor, format_string in ((1, '%i'),
(1024, '%iKiB'),
(1024 * 1024, '%.1fMiB')):
if size / factor < 1024:
return format_string % (size / factor)
return format_string % (size / factor)
def parse_mailcap_nametemplate(tmplate='%s'):
"""this returns a prefix and suffix to be used
in the tempfile module for a given mailcap nametemplate string"""
nt_list = tmplate.split('%s')
template_prefix = ''
template_suffix = ''
if len(nt_list) == 2:
template_suffix = nt_list[1]
template_prefix = nt_list[0]
else:
template_suffix = tmplate
return (template_prefix, template_suffix)
def parse_mailto(mailto_str):
"""
Interpret mailto-string
:param mailto_str: the string to interpret. Must conform to :rfc:2368.
:type mailto_str: str
:return: the header fields and the body found in the mailto link as a tuple
of length two
:rtype: tuple(dict(str->list(str)), str)
"""
if mailto_str.startswith('mailto:'):
import urllib.parse
to_str, parms_str = mailto_str[7:].partition('?')[::2]
headers = {}
body = ''
to = urllib.parse.unquote(to_str)
if to:
headers['To'] = [to]
for s in parms_str.split('&'):
key, value = s.partition('=')[::2]
key = key.capitalize()
if key == 'Body':
body = urllib.parse.unquote(value)
elif value:
headers[key] = [urllib.parse.unquote(value)]
return (headers, body)
else:
return (None, None)
def mailto_to_envelope(mailto_str):
"""
Interpret mailto-string into a :class:`alot.db.envelope.Envelope`
"""
from alot.db.envelope import Envelope
headers, body = parse_mailto(mailto_str)
return Envelope(bodytext=body, headers=headers)
def RFC3156_canonicalize(text):
"""
Canonicalizes plain text (MIME-encoded usually) according to RFC3156.
This function works as follows (in that order):
1. Convert all line endings to \\\\r\\\\n (DOS line endings).
2. Encode all occurrences of "From " at the beginning of a line
to "From=20" in order to prevent other mail programs to replace
this with "> From" (to avoid MBox conflicts) and thus invalidate
the signature.
:param text: text to canonicalize (already encoded as quoted-printable)
:rtype: str
"""
text = re.sub("\r?\n", "\r\n", text)
text = re.sub("^From ", "From=20", text, flags=re.MULTILINE)
return text
def get_xdg_env(env_name, fallback):
""" Used for XDG_* env variables to return fallback if unset *or* empty """
env = os.environ.get(env_name)
return env if env else fallback
| gpl-3.0 | -2,359,341,438,656,932,400 | 32.440397 | 79 | 0.624171 | false | 3.934164 | false | false | false |
AenBleidd/man2wiki | man2wiki.py | 1 | 2114 | import sys
import re
from os import listdir, linesep
from os.path import isfile, join, splitext
def clearLine(line):
return line.strip().replace(r'\&', '').replace(r'\fB', '').replace(r'\fC', '').replace(r'\fP', '').replace(r'\-', '-').replace(r'#', '<nowiki>#</nowiki>')
def convert(in_filename, out_filename):
f = open(out_filename, 'w')
for line in open(in_filename):
m = re.match(r'^\.\\\"', line)
if m is not None:
continue
if line.strip() == '.TP' or line.strip() == '.PP' or line.strip() == '.nh':
continue
m = re.match(r'^\.TH\s+', line)
if m is not None:
continue
m = re.match(r'^\.SH\s+("?)(.*)(\1)\s*$', line)
if m is not None:
f.write(linesep)
f.write("== " + clearLine(m.group(2)) + " ==")
f.write(linesep)
continue
m = re.match(r'^\.R?B\s+(.*)\s*$', line)
if m is not None:
f.write("**" + clearLine(m.group(1)) + "**")
f.write(linesep)
continue
m = re.match(r'^\.I\s+(.*)\s*$', line)
if m is not None:
f.write("//" + clearLine(m.group(1)) + "//")
f.write(linesep)
continue
if line.strip() == ".br":
f.write(linesep)
continue
m = re.match(r'^\.in\s', line)
if m is not None:
continue
m = re.match(r'^\.ti\s', line)
if m is not None:
continue
m = re.match(r'^\.ad\s', line)
if m is not None:
continue
m = re.match(r'^\.SS\s+("?)(.*)(\1)\s*$', line)
if m is not None:
f.write(linesep)
f.write("=== " + clearLine(m.group(2)) + " ===")
f.write(linesep)
continue
m = re.match(r'^\.RI\s+("?)(\\fI)(.*)(\\fP)(\1)\s*$', line)
if m is not None:
f.write(linesep)
f.write(clearLine(m.group(3)))
f.write(linesep)
continue
m = re.match(r'^\.RI\s+("?)(.*)(\1)\s*$', line)
if m is not None:
f.write(linesep)
f.write("==== " + clearLine(m.group(2)) + " ====")
f.write(linesep)
continue
f.write(clearLine(line))
f.close()
if len(sys.argv) != 3:
print("Usage: python man2wiki.py [man folder] [wiki folder]")
else:
for f in [f for f in listdir(sys.argv[1]) if isfile(join(sys.argv[1],f))]:
convert(join(sys.argv[1], f), join(sys.argv[2], splitext(f)[0] + ".wiki")) | gpl-3.0 | -1,009,650,140,952,821,800 | 27.581081 | 155 | 0.556291 | false | 2.446759 | false | false | false |
magne-max/zipline-ja | zipline/pipeline/loaders/utils.py | 1 | 9840 | import datetime
import numpy as np
import pandas as pd
from zipline.utils.pandas_utils import mask_between_time
def is_sorted_ascending(a):
"""Check if a numpy array is sorted."""
return (np.fmax.accumulate(a) <= a).all()
def validate_event_metadata(event_dates,
event_timestamps,
event_sids):
assert is_sorted_ascending(event_dates), "event dates must be sorted"
assert len(event_sids) == len(event_dates) == len(event_timestamps), \
"mismatched arrays: %d != %d != %d" % (
len(event_sids),
len(event_dates),
len(event_timestamps),
)
def next_event_indexer(all_dates,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the next event for each
sid at each moment in time.
Locations where no next event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
sid_ixs = all_sids.searchsorted(event_sids)
# side='right' here ensures that we include the event date itself
# if it's in all_dates.
dt_ixs = all_dates.searchsorted(event_dates, side='right')
ts_ixs = all_dates.searchsorted(event_timestamps)
# Walk backward through the events, writing the index of the event into
# slots ranging from the event's timestamp to its asof. This depends for
# correctness on the fact that event_dates is sorted in ascending order,
# because we need to overwrite later events with earlier ones if their
# eligible windows overlap.
for i in range(len(event_sids) - 1, -1, -1):
start_ix = ts_ixs[i]
end_ix = dt_ixs[i]
out[start_ix:end_ix, sid_ixs[i]] = i
return out
def previous_event_indexer(all_dates,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the previous event for
each sid at each moment in time.
Locations where no previous event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
eff_dts = np.maximum(event_dates, event_timestamps)
sid_ixs = all_sids.searchsorted(event_sids)
dt_ixs = all_dates.searchsorted(eff_dts)
# Walk backwards through the events, writing the index of the event into
# slots ranging from max(event_date, event_timestamp) to the start of the
# previously-written event. This depends for correctness on the fact that
# event_dates is sorted in ascending order, because we need to have written
# later events so we know where to stop forward-filling earlier events.
last_written = {}
for i in range(len(event_dates) - 1, -1, -1):
sid_ix = sid_ixs[i]
dt_ix = dt_ixs[i]
out[dt_ix:last_written.get(sid_ix, None), sid_ix] = i
last_written[sid_ix] = dt_ix
return out
def normalize_data_query_time(dt, time, tz):
"""Apply the correct time and timezone to a date.
Parameters
----------
dt : pd.Timestamp
The original datetime that represents the date.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
Returns
-------
query_dt : pd.Timestamp
The timestamp with the correct time and date in utc.
"""
# merge the correct date with the time in the given timezone then convert
# back to utc
return pd.Timestamp(
datetime.datetime.combine(dt.date(), time),
tz=tz,
).tz_convert('utc')
def normalize_data_query_bounds(lower, upper, time, tz):
"""Adjust the first and last dates in the requested datetime index based on
the provided query time and tz.
lower : pd.Timestamp
The lower date requested.
upper : pd.Timestamp
The upper date requested.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
"""
# Subtract one day to grab things that happened on the first day we are
# requesting. This doesn't need to be a trading day, we are only adding
# a lower bound to limit the amount of in memory filtering that needs
# to happen.
lower -= datetime.timedelta(days=1)
if time is not None:
return normalize_data_query_time(
lower,
time,
tz,
), normalize_data_query_time(
upper,
time,
tz,
)
return lower, upper
_midnight = datetime.time(0, 0)
def normalize_timestamp_to_query_time(df,
time,
tz,
inplace=False,
ts_field='timestamp'):
"""Update the timestamp field of a dataframe to normalize dates around
some data query time/timezone.
Parameters
----------
df : pd.DataFrame
The dataframe to update. This needs a column named ``ts_field``.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
inplace : bool, optional
Update the dataframe in place.
ts_field : str, optional
The name of the timestamp field in ``df``.
Returns
-------
df : pd.DataFrame
The dataframe with the timestamp field normalized. If ``inplace`` is
true, then this will be the same object as ``df`` otherwise this will
be a copy.
"""
if not inplace:
# don't mutate the dataframe in place
df = df.copy()
dtidx = pd.DatetimeIndex(df.loc[:, ts_field], tz='utc')
dtidx_local_time = dtidx.tz_convert(tz)
to_roll_forward = mask_between_time(
dtidx_local_time,
time,
_midnight,
include_end=False,
)
# For all of the times that are greater than our query time add 1
# day and truncate to the date.
# We normalize twice here because of a bug in pandas 0.16.1 that causes
# tz_localize() to shift some timestamps by an hour if they are not grouped
# together by DST/EST.
df.loc[to_roll_forward, ts_field] = (
dtidx_local_time[to_roll_forward] + datetime.timedelta(days=1)
).normalize().tz_localize(None).tz_localize('utc').normalize()
df.loc[~to_roll_forward, ts_field] = dtidx[~to_roll_forward].normalize()
return df
def check_data_query_args(data_query_time, data_query_tz):
"""Checks the data_query_time and data_query_tz arguments for loaders
and raises a standard exception if one is None and the other is not.
Parameters
----------
data_query_time : datetime.time or None
data_query_tz : tzinfo or None
Raises
------
ValueError
Raised when only one of the arguments is None.
"""
if (data_query_time is None) ^ (data_query_tz is None):
raise ValueError(
"either 'data_query_time' and 'data_query_tz' must both be"
" None or neither may be None (got %r, %r)" % (
data_query_time,
data_query_tz,
),
)
| apache-2.0 | 8,966,708,791,491,797,000 | 34.912409 | 79 | 0.618699 | false | 4.016327 | false | false | false |
mrrrgn/build-relengapi | relengapi/lib/auth/ldap_group_authz.py | 1 | 2918 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import itertools
import ldap
import logging
from relengapi import p
from relengapi.lib.auth import permissions_stale
class LdapGroups(object):
def __init__(self, app):
permissions_cfg = app.config.get('RELENGAPI_PERMISSIONS', {})
self.group_permissions = permissions_cfg.get('group-permissions', {})
# verify that each specified permission exists
for perm in set(itertools.chain(*self.group_permissions.values())):
try:
p[perm]
except KeyError:
raise RuntimeError(
"invalid permission in settings: %r" % (perm,))
self.uri = permissions_cfg['uri']
self.login_dn = permissions_cfg['login_dn']
self.login_password = permissions_cfg['login_password']
self.user_base = permissions_cfg['user_base']
self.group_base = permissions_cfg['group_base']
self.debug = permissions_cfg.get('debug')
self.logger = logging.getLogger(__name__)
permissions_stale.connect_via(app)(self.on_permissions_stale)
def get_user_groups(self, mail):
if self.debug:
self.logger.debug('Making LDAP query for %s', mail)
try:
l = ldap.initialize(self.uri)
l.simple_bind_s(self.login_dn, self.login_password)
# convert mail to DN
people = l.search_s(self.user_base, ldap.SCOPE_SUBTREE,
'(&(objectClass=inetOrgPerson)(mail=%s))' % (mail,), [])
if not people or len(people) != 1:
return []
user_dn = people[0][0]
result = l.search_s(self.group_base, ldap.SCOPE_SUBTREE,
'(&(objectClass=groupOfNames)(member=%s))' % user_dn, ['cn'])
groups = []
for glist in [g[1]['cn'] for g in result]:
groups.extend(glist)
return list(set(groups))
except ldap.LDAPError:
self.logger.exception("While connecting to the LDAP server")
return []
def on_permissions_stale(self, sender, user, permissions):
groups = self.get_user_groups(user.authenticated_email)
if self.debug:
self.logger.debug("Got groups %s for user %s", groups, user)
allowed_permissions = set()
for group in groups:
for perm in self.group_permissions.get(group, []):
allowed_permissions.add(perm)
if self.debug:
self.logger.debug("Setting permissions %s for user %s",
', '.join(allowed_permissions), user)
permissions.update([p[a] for a in allowed_permissions])
def init_app(app):
LdapGroups(app)
| mpl-2.0 | -6,353,090,569,641,761,000 | 37.394737 | 93 | 0.585675 | false | 4.002743 | false | false | false |
swatlab/uplift-analysis | src_code_metrics.py | 1 | 4848 | import re, csv, pytz, json, subprocess
from dateutil import parser
import pandas as pd
import get_bugs
from libmozdata import patchanalysis
# Execute a shell command
def shellCommand(command_str):
cmd =subprocess.Popen(command_str.split(' '), stdout=subprocess.PIPE)
cmd_out, cmd_err = cmd.communicate()
return cmd_out
def loadReleaseDate():
print 'Loading Relase date ...'
rel_date_list = list()
rel_list = list()
with open('complexity_sna/data/release2commit.csv') as f:
csvreader = csv.reader(f)
for row in csvreader:
rel_num = row[0]
rel_date = re.sub(r'[^0-9]', '', row[2])
rel_date_list.append([rel_date, rel_num])
rel_list.append(rel_num)
return rel_date_list, list(reversed(rel_list))
def loadCommitDate():
print 'Loading commit date ...'
commit_date_dict = dict()
with open('commit_date.csv') as f:
csvreader = csv.reader(f, delimiter='\t')
for row in csvreader:
commit_id = row[0]
raw_time = row[1]
datetime_obj = parser.parse(raw_time)
time_str = datetime_obj.astimezone(pytz.utc).strftime('%Y%m%d')
commit_date_dict[commit_id] = time_str
return commit_date_dict
def correspondingRelease(commit_id, commit_date_dict, rel_date_list):
if commit_id in commit_date_dict:
commit_date = commit_date_dict[commit_id]
else:
for key in commit_date_dict:
if commit_id in key:
commit_date = commit_date_dict[key]
for item in rel_date_list:
if commit_date >= item[0]:
return item[1]
return rel_date_list[-1][1]
def removePrefix(path):
return re.sub(r'^[\/\.]+', '', path)
def loadMetrics4Releases(category, release_list):
rel_metric_dict = dict()
metric_names = None
for rel in release_list:
metric_dict = dict()
metric_file = 'complexity_sna/code_metrics/%s-%s.csv' %(category, rel.replace('.', '_'))
with open(metric_file, 'r') as f:
csvreader = csv.reader(f)
metric_names = next(csvreader, None)[1:]
for line in csvreader:
key = removePrefix(line[0])
metric_dict[key] = line[1:]
rel_metric_dict[rel] = metric_dict
return rel_metric_dict, metric_names
def extractSourceCodeMetrics(rel_date_list, rel_list, commit_date_dict, category):
# load metrics
rel_metric_dict, metric_names = loadMetrics4Releases(category, rel_list)
# map and compute metric values
result_list = list()
i = 0
bugs = get_bugs.get_all()
for bug in bugs:
if DEBUG and i > 5:
break
bug_id = bug['id']
commits, _ = patchanalysis.get_commits_for_bug(bug)
print bug_id
# extract metrics
raw_list = list()
metric_list = list()
for commit_id in commits:
i += 1
if DEBUG:
print ' ', commit_id
# corresponding (prior) release of a commit
rel_num = correspondingRelease(commit_id, commit_date_dict, rel_date_list)
# changed files in a commit
shell_res = shellCommand('hg -R %s log -r %s --template {files}\t{diffstat}' %(HG_REPO_PATH, commit_id)).split('\t')
raw_changed_files = shell_res[0]
cpp_changed_files = re.findall(r'(\S+\.(?:c|cpp|cc|cxx|h|hpp|hxx)\b)', raw_changed_files)
# map file/node to metrics
for a_file in cpp_changed_files:
metric_dict = rel_metric_dict[rel_num]
for node in metric_dict:
if node in a_file:
metrics = metric_dict[node]
raw_list.append(metrics)
# compute average/sum value for a specific attachment
if len(raw_list):
df = pd.DataFrame(raw_list, columns=metric_names).apply(pd.to_numeric)
for metric_name in metric_names:
metric_list.append(round(df[metric_name].mean(), 2))
result_list.append([bug_id] + metric_list)
else:
result_list.append([bug_id] + [0]*len(metric_names))
return pd.DataFrame(result_list, columns=['bug_id']+metric_names)
if __name__ == '__main__':
DEBUG = False
HG_REPO_PATH = '../firefox/'
# load data
rel_date_list, rel_list = loadReleaseDate()
commit_date_dict = loadCommitDate()
# extract metrics
df_complexity = extractSourceCodeMetrics(rel_date_list, rel_list, commit_date_dict, 'complexity')
df_sna = extractSourceCodeMetrics(rel_date_list, rel_list, commit_date_dict, 'sna')
df_code = pd.merge(df_complexity, df_sna, on='bug_id')
df_code.to_csv('independent_metrics/src_code_metrics.csv', index=False)
if DEBUG:
print df_code
| mpl-2.0 | 8,802,598,191,561,831,000 | 36.007634 | 128 | 0.591172 | false | 3.546452 | false | false | false |
VapourApps/va_master | va_master/utils/old_to_new_store.py | 1 | 3048 | import requests, json, functools
import base64
import os
path = 'http://127.0.0.1:8500'
from va_master.handlers.datastore_handler import DatastoreHandler
from va_master.consul_kv.datastore import ConsulStore
import tornado.ioloop
folder_pwd = os.path.join(os.path.dirname(os.path.realpath(__file__)), '')
datastore = ConsulStore()
datastore_handler = DatastoreHandler(datastore)
run_sync = tornado.ioloop.IOLoop.instance().run_sync
def datastore_get(handle, get_key = ''):
url = '%s/v1/kv/%s' % (path, handle)
print 'Url is : ', url
result = requests.get(url).text
if not result:
return
result = json.loads(result)
result = [x['Value'] for x in result]
result = [json.loads(base64.b64decode(x)) for x in result if x]
result = result[0]
if get_key:
result = result.get(get_key, result)
return result
def datastore_insert(handle, data):
url = '%s/v1/kv/%s' % (path, handle)
print 'Url is : ', url
result = requests.put(url, data = data)
print result
def old_to_new_datastore(object_name, object_handle_unformatted, object_handle_ids = [], get_key = '', special_data_parsing = None, special_data_kwargs = {}, old_key = ''):
if not old_key:
old_key = object_name + 's'
old_data = datastore_get(old_key, get_key)
if not old_data: return
for data in old_data:
print 'Data is : ', data
try:
handles = {x : data.get(x) for x in object_handle_ids}
except:
continue #This usually happens if the script has already been run.
object_handle = object_handle_unformatted.format(**handles)
if special_data_parsing:
data = special_data_parsing(data, **special_data_kwargs)
print 'Want to insert : ', data, ' in : ', object_handle, ' with handles : ', handles
run_sync(functools.partial(datastore_handler.insert_object, object_name, data = data, handle_data = handles))
def panel_parsing(data, user_type):
if type(data['panels']) == list:
data['panels'] = {user_type : []}
panel = {
'name' : data['name'],
'panels' : data['panels'][user_type],
'icon' : data['icon'],
'servers' : data['servers']
}
return panel
def convert_all():
old_to_new_datastore('user', 'users/{username}', ['username'])
old_to_new_datastore('admin', 'admins/{username}', ['username'])
old_to_new_datastore('admin_panel', 'panels/admin/{name}', ['name'], get_key = 'admin', special_data_parsing = panel_parsing, special_data_kwargs = {"user_type" : "admin"}, old_key = 'panels')
old_to_new_datastore('user_panel', 'panels/user/{name}', ['name'], get_key = 'user', special_data_parsing = panel_parsing, special_data_kwargs = {"user_type" : "user"}, old_key = 'panels')
old_to_new_datastore('user_group', 'user_groups/{group_name}', ['group_name'])
old_to_new_datastore('state', 'state/{name}', ['name'], get_key = 'states', old_key = 'init_vals')
if __name__ == '__main__' :
convert_all()
| gpl-3.0 | 821,649,943,494,431,100 | 37.582278 | 196 | 0.624016 | false | 3.367956 | false | false | false |
AparatTechnologies/django-connectwise | djconnectwise/api.py | 1 | 16448 | import logging
from django.conf import settings
from djconnectwise.utils import RequestSettings
import re
import requests
from retrying import retry
class ConnectWiseAPIError(Exception):
"""Raise this, not request exceptions."""
pass
class ConnectWiseRecordNotFoundError(ConnectWiseAPIError):
"""The record was not found."""
pass
CW_RESPONSE_MAX_RECORDS = 1000 # The greatest number of records ConnectWise
# will send us in one response.
RETRY_WAIT_EXPONENTIAL_MULTAPPLIER = 1000 # Initial number of milliseconds to
# wait before retrying a request.
RETRY_WAIT_EXPONENTIAL_MAX = 10000 # Maximum number of milliseconds to wait
# before retrying a request.
CW_DEFAULT_PAGE = 1 # CW Pagination is 1-indexed
CONTENT_DISPOSITION_RE = re.compile(
'^attachment; filename=\"{0,1}(.*?)\"{0,1}$'
)
logger = logging.getLogger(__name__)
class ConnectWiseAPIClient(object):
API = None
def __init__(
self,
company_id=None,
server_url=None,
api_public_key=None,
api_private_key=None,
api_codebase=None
):
if not company_id:
company_id = settings.CONNECTWISE_CREDENTIALS['company_id']
if not server_url:
server_url = settings.CONNECTWISE_SERVER_URL
if not api_public_key:
api_public_key = settings.CONNECTWISE_CREDENTIALS['api_public_key']
if not api_private_key:
api_private_key = settings.CONNECTWISE_CREDENTIALS[
'api_private_key'
]
if not api_codebase:
api_codebase = settings.CONNECTWISE_CREDENTIALS['api_codebase']
if not self.API:
raise ValueError('API not specified')
self.api_public_key = api_public_key
self.api_private_key = api_private_key
self.api_codebase = api_codebase
self.server_url = '{0}/{1}/apis/3.0/{2}/'.format(
server_url,
self.api_codebase,
self.API,
)
self.auth = ('{0}+{1}'.format(company_id, self.api_public_key),
'{0}'.format(self.api_private_key),)
self.request_settings = RequestSettings().get_settings()
self.timeout = self.request_settings['timeout']
def _endpoint(self, path):
return '{0}{1}'.format(self.server_url, path)
def _log_failed(self, response):
logger.error('FAILED API CALL: {0} - {1} - {2}'.format(
response.url, response.status_code, response.content))
def fetch_resource(self, endpoint_url, params=None, should_page=False,
retry_counter=None,
*args, **kwargs):
"""
A convenience method for issuing a request to the
specified REST endpoint.
Note: retry_counter is used specifically for testing.
It is a dict in the form {'count': 0} that is passed in
to verify the number of attempts that were made.
"""
@retry(stop_max_attempt_number=self.request_settings['max_attempts'],
wait_exponential_multiplier=RETRY_WAIT_EXPONENTIAL_MULTAPPLIER,
wait_exponential_max=RETRY_WAIT_EXPONENTIAL_MAX)
def _fetch_resource(endpoint_url, params=None, should_page=False,
retry_counter=None,
*args, **kwargs):
if retry_counter:
retry_counter['count'] += 1
if not params:
params = {}
if should_page:
params['pageSize'] = kwargs.get('page_size',
CW_RESPONSE_MAX_RECORDS)
params['page'] = kwargs.get('page', CW_DEFAULT_PAGE)
try:
endpoint = self._endpoint(endpoint_url)
logger.debug('Making GET request to {}'.format(endpoint))
response = requests.get(
endpoint,
params=params,
auth=self.auth,
timeout=self.timeout,
)
except requests.RequestException as e:
logger.error('Request failed: GET {}: {}'.format(endpoint, e))
raise ConnectWiseAPIError('{}'.format(e))
if 200 <= response.status_code < 300:
return response.json()
if response.status_code == 404:
msg = 'Resource {} was not found.'.format(response.url)
logger.warning(msg)
raise ConnectWiseRecordNotFoundError(msg)
else:
self._log_failed(response)
raise ConnectWiseAPIError(response.content)
return _fetch_resource(endpoint_url, params=params,
should_page=should_page,
*args, **kwargs)
class ProjectAPIClient(ConnectWiseAPIClient):
API = 'project'
ENDPOINT_PROJECTS = 'projects/'
def get_project(self, project_id):
endpoint_url = '{}/{}'.format(self.ENDPOINT_PROJECTS, project_id)
return self.fetch_resource(endpoint_url)
def get_projects(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_PROJECTS, should_page=True,
*args, **kwargs)
class CompanyAPIClient(ConnectWiseAPIClient):
API = 'company'
ENDPOINT_COMPANIES = 'companies'
ENDPOINT_COMPANY_STATUSES = '{}/statuses'.format(ENDPOINT_COMPANIES)
def by_id(self, company_id):
endpoint_url = '{}/{}'.format(self.ENDPOINT_COMPANIES, company_id)
return self.fetch_resource(endpoint_url)
def get_companies(self, *args, **kwargs):
if 'conditions' in kwargs:
kwargs['params'] = {
'conditions': kwargs['conditions']
}
return self.fetch_resource(self.ENDPOINT_COMPANIES, should_page=True,
*args, **kwargs)
def get_company_statuses(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_COMPANY_STATUSES,
should_page=True,
*args, **kwargs)
class SalesAPIClient(ConnectWiseAPIClient):
API = 'sales'
ENDPOINT_OPPORTUNITIES = 'opportunities'
ENDPOINT_OPPORTUNITY_STATUSES = \
'{}/statuses'.format(ENDPOINT_OPPORTUNITIES)
ENDPOINT_OPPORTUNITY_TYPES = \
'{}/types'.format(ENDPOINT_OPPORTUNITIES)
def by_id(self, opportunity_id):
endpoint_url = '{}/{}'.format(
self.ENDPOINT_OPPORTUNITIES, opportunity_id)
return self.fetch_resource(endpoint_url)
def get_opportunities(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_OPPORTUNITIES,
should_page=True,
*args, **kwargs)
def get_opportunity_statuses(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_OPPORTUNITY_STATUSES,
should_page=True,
*args, **kwargs)
def get_opportunity_types(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_OPPORTUNITY_TYPES,
should_page=True,
*args, **kwargs)
class SystemAPIClient(ConnectWiseAPIClient):
API = 'system'
# endpoints
ENDPOINT_MEMBERS = 'members/'
ENDPOINT_MEMBERS_IMAGE = 'members/{}/image'
ENDPOINT_MEMBERS_COUNT = 'members/count'
ENDPOINT_CALLBACKS = 'callbacks/'
ENDPOINT_INFO = 'info/'
def get_connectwise_version(self):
result = self.fetch_resource(self.ENDPOINT_INFO)
return result.get('version', '')
def get_members(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_MEMBERS,
should_page=True, *args, **kwargs)
def get_member_count(self):
return self.fetch_resource(self.ENDPOINT_MEMBERS_COUNT)
def get_callbacks(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_CALLBACKS,
should_page=True, *args, **kwargs)
def delete_callback(self, entry_id):
try:
endpoint = self._endpoint(
'{}{}'.format(self.ENDPOINT_CALLBACKS, entry_id)
)
logger.debug('Making DELETE request to {}'.format(endpoint))
response = requests.request(
'delete',
endpoint,
auth=self.auth,
timeout=self.timeout,
)
except requests.RequestException as e:
logger.error('Request failed: DELETE {}: {}'.format(endpoint, e))
raise ConnectWiseAPIError('{}'.format(e))
response.raise_for_status()
return response
def create_callback(self, callback_entry):
try:
endpoint = self._endpoint(self.ENDPOINT_CALLBACKS)
logger.debug('Making POST request to {}'.format(endpoint))
response = requests.request(
'post',
endpoint,
json=callback_entry,
auth=self.auth,
timeout=self.timeout,
)
except requests.RequestException as e:
logger.error('Request failed: POST {}: {}'.format(endpoint, e))
raise ConnectWiseAPIError('{}'.format(e))
if 200 <= response.status_code < 300:
return response.json()
else:
self._log_failed(response)
raise ConnectWiseAPIError(response.content)
def update_callback(self, callback_entry):
try:
endpoint = self._endpoint(
'callbacks/{0}'.format(callback_entry.entry_id)
)
logger.debug('Making PUT request to {}'.format(endpoint))
response = requests.request(
'put',
endpoint,
json=callback_entry,
auth=self.auth,
timeout=self.timeout,
)
except requests.RequestException as e:
logger.error('Request failed: PUT {}: {}'.format(endpoint, e))
raise ConnectWiseAPIError('{}'.format(e))
if 200 <= response.status_code < 300:
return response.json()
else:
self._log_failed(response)
raise ConnectWiseAPIError(response.content)
def get_member_by_identifier(self, identifier):
return self.fetch_resource('members/{0}'.format(identifier))
def get_member_image_by_identifier(self, identifier):
"""
Return a (filename, content) tuple.
"""
try:
endpoint = self._endpoint(
self.ENDPOINT_MEMBERS_IMAGE.format(identifier)
)
logger.debug('Making GET request to {}'.format(endpoint))
response = requests.get(
endpoint,
auth=self.auth,
timeout=self.timeout,
)
except requests.RequestException as e:
logger.error('Request failed: GET {}: {}'.format(endpoint, e))
raise ConnectWiseAPIError('{}'.format(e))
if 200 <= response.status_code < 300:
headers = response.headers
content_disposition_header = headers.get('Content-Disposition',
default='')
msg = "Got member '{}' image; size {} bytes " \
"and content-disposition header '{}'"
logger.info(msg.format(
identifier,
len(response.content),
content_disposition_header
))
attachment_filename = self._attachment_filename(
content_disposition_header)
return attachment_filename, response.content
else:
self._log_failed(response)
return None, None
def _attachment_filename(self, content_disposition):
"""
Return the attachment filename from the content disposition header.
If there's no match, return None.
"""
m = CONTENT_DISPOSITION_RE.match(content_disposition)
return m.group(1) if m else None
class ServiceAPIClient(ConnectWiseAPIClient):
API = 'service'
ENDPOINT_TICKETS = 'tickets'
ENDPOINT_BOARDS = 'boards'
ENDPOINT_PRIORITIES = 'priorities'
ENDPOINT_LOCATIONS = 'locations'
def __init__(self, *args, **kwargs):
self.extra_conditions = None
if 'extra_conditions' in kwargs:
self.extra_conditions = kwargs.pop('extra_conditions')
super().__init__(*args, **kwargs)
def get_conditions(self):
default_conditions = settings.DJCONNECTWISE_DEFAULT_TICKET_CONDITIONS
condition_list = [c for c in [
default_conditions, self.extra_conditions] if c]
conditions = ''
for condition in condition_list:
condition = '({})'.format(condition)
if conditions:
condition = ' AND {}'.format(condition)
conditions += condition
return conditions
def tickets_count(self):
params = dict(
conditions=self.get_conditions(),
)
return self.fetch_resource(
'{}/count'.format(self.ENDPOINT_TICKETS), params
).get('count', 0)
def get_ticket(self, ticket_id):
endpoint_url = '{}/{}'.format(self.ENDPOINT_TICKETS, ticket_id)
return self.fetch_resource(endpoint_url)
def get_tickets(self, *args, **kwargs):
params = dict(
conditions=self.get_conditions()
)
return self.fetch_resource(self.ENDPOINT_TICKETS, should_page=True,
params=params, *args, **kwargs)
def update_ticket_status(self, ticket_id, closed_flag, status):
"""
Update the ticket's closedFlag and status on the server.
"""
# Yeah, this schema is a bit bizarre. See CW docs at
# https://developer.connectwise.com/Manage/Developer_Guide#Patch
body = [
{
'op': 'replace',
'path': 'closedFlag',
'value': closed_flag
},
{
'op': 'replace',
'path': 'status',
'value': {
'id': status.id,
'name': status.name,
},
},
]
try:
endpoint = self._endpoint(
'{}/{}'.format(self.ENDPOINT_TICKETS, ticket_id)
)
logger.debug('Making PATCH request to {}'.format(endpoint))
response = requests.patch(
endpoint,
json=body,
auth=self.auth,
timeout=self.timeout,
)
except requests.RequestException as e:
logger.error('Request failed: PATCH {}: {}'.format(endpoint, e))
raise ConnectWiseAPIError('{}'.format(e))
if 200 <= response.status_code < 300:
return response.json()
else:
self._log_failed(response)
raise ConnectWiseAPIError(response.content)
def get_statuses(self, board_id, *args, **kwargs):
"""
Returns the status types associated with the specified board.
"""
endpoint_url = '{}/{}/statuses'.format(self.ENDPOINT_BOARDS, board_id)
return self.fetch_resource(endpoint_url, should_page=True,
*args, **kwargs)
def get_boards(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_BOARDS, should_page=True,
*args, **kwargs)
def get_board(self, board_id):
return self.fetch_resource('{}/{}'.format(
self.ENDPOINT_BOARDS, board_id)
)
def get_priorities(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_PRIORITIES, should_page=True,
*args, **kwargs)
def get_teams(self, board_id, *args, **kwargs):
endpoint = '{}/{}/teams/'.format(self.ENDPOINT_BOARDS, board_id)
return self.fetch_resource(endpoint, should_page=True, *args, **kwargs)
def get_locations(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_LOCATIONS, should_page=True,
*args, **kwargs)
| mit | -2,677,239,882,592,082,400 | 34.912664 | 79 | 0.557515 | false | 4.284449 | false | false | false |
mark-rushakoff/FlackOverstow | grabber.py | 1 | 1835 | #!/usr/bin/env python
__author__ = "Mark Rushakoff"
__license__ = "MIT"
import sys
import urllib2
import re
import StringIO
import gzip
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
sys.stderr.write("simplejson or json required for operation. Aborting.\n")
sys.exit()
try:
from BeautifulSoup import BeautifulStoneSoup as bss
except ImportError:
sys.stderr.write("BeautifulSoup required to format data")
sys.stderr.write("Try `easy_install beautifulsoup`")
sys.exit()
stripHtmlTags = re.compile(r"<[^>]*>")
compressWhiteSpace = re.compile(r"\s+")
def format(text):
return bss(compressWhiteSpace.sub(' ', stripHtmlTags.sub('', text)), convertEntities=bss.ALL_ENTITIES)
class Grabber(object):
""" Class to obtain JSON data from Stack API """
_api = '1.0'
def __init__(self, site, user_id, api_key=None):
self.site = site
self.user_id = user_id
self.api_key = api_key
def _grab(self, users_arg):
url = 'http://api.%s/%s/users/%s/%s?body=true&pagesize=100' % (self.site, self._api, self.user_id, users_arg)
if self.api_key is not None:
url += '&key=%s' % self.api_key
content = StringIO.StringIO(urllib2.urlopen(url).read())
return gzip.GzipFile(fileobj=content).read()
def minimal_text(self, users_arg):
""" return a list of just the simple text of the `body`s of the users_arg section of the pulled json """
json_data = self._grab(users_arg)
answers = [answer['body'] for answer in json.loads(json_data)[users_arg]]
return [str(format(answer)) for answer in answers]
if __name__ == "__main__":
grabber = Grabber('stackoverflow.com', 126042)
for g in grabber.minimal_text('answers'):
print g
| mit | -9,221,221,283,494,337,000 | 29.583333 | 117 | 0.643052 | false | 3.475379 | false | false | false |
ddico/odoo | addons/l10n_hr/__manifest__.py | 1 | 1748 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Author: Goran Kliska
# mail: goran.kliska(AT)slobodni-programi.hr
# Copyright (C) 2011- Slobodni programi d.o.o., Zagreb
# Contributions:
# Tomislav Bošnjaković, Storm Computers d.o.o. :
# - account types
{
"name": "Croatia - Accounting (RRIF 2012)",
"description": """
Croatian localisation.
======================
Author: Goran Kliska, Slobodni programi d.o.o., Zagreb
https://www.slobodni-programi.hr
Contributions:
Tomislav Bošnjaković, Storm Computers: tipovi konta
Ivan Vađić, Slobodni programi: tipovi konta
Description:
Croatian Chart of Accounts (RRIF ver.2012)
RRIF-ov računski plan za poduzetnike za 2012.
Vrste konta
Kontni plan prema RRIF-u, dorađen u smislu kraćenja naziva i dodavanja analitika
Porezne grupe prema poreznoj prijavi
Porezi PDV obrasca
Ostali porezi
Osnovne fiskalne pozicije
Izvori podataka:
https://www.rrif.hr/dok/preuzimanje/rrif-rp2011.rar
https://www.rrif.hr/dok/preuzimanje/rrif-rp2012.rar
""",
"version": "13.0",
"author": "OpenERP Croatian Community",
'category': 'Accounting/Localizations',
'depends': [
'account',
],
'data': [
'data/l10n_hr_chart_data.xml',
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account_chart_tag_data.xml',
'data/account.tax.group.csv',
'data/account_tax_report_data.xml',
'data/account_tax_template_data.xml',
'data/account_tax_fiscal_position_data.xml',
'data/account_chart_template_data.xml',
],
'demo': [
'demo/demo_company.xml',
],
"active": False,
}
| agpl-3.0 | 3,099,756,170,522,868,700 | 26.603175 | 80 | 0.66015 | false | 2.654962 | false | false | false |
PrzemyslawUrbanczyk/pu_zadanie1 | fixture/contact.py | 1 | 10214 | import re
from model.contact import Contact
class ContactHelper:
def __init__(self, app):
self.app = app
def return_to_home_page(self):
wd = self.app.wd
wd.find_element_by_link_text("home page").click()
def create(self, contact):
wd = self.app.wd
# init contact creation
wd.find_element_by_link_text("nowy wpis").click()
# fill contact form
self.fill_contact_form(contact)
# submit contact creation
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.contact_cache = None
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_field_value("firstname", contact.first_name)
self.change_field_value("middlename", contact.middle_name)
self.change_field_value("lastname", contact.last_name)
self.change_field_value("nickname", contact.nickname)
self.change_field_value("title", contact.title)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.home_number)
self.change_field_value("mobile", contact.mobile_number)
self.change_field_value("work", contact.work_number)
self.change_field_value("fax", contact.fax)
self.change_field_value("email", contact.first_email)
self.change_field_value("email2", contact.second_email)
self.change_field_value("email3", contact.third_email)
self.change_field_value("homepage", contact.wwwpage)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[16]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[16]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[2]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[2]").click()
self.change_field_value("byear", contact.birth_year)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[17]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[17]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[6]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[6]").click()
self.change_field_value("ayear", contact.anniversary_year)
self.change_field_value("address2", contact.second_address)
self.change_field_value("phone2", contact.second_private_number)
self.change_field_value("notes", contact.notes)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.open_contacts_page()
self.select_contact_by_index(index)
# confirm deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.app.open_home_page()
# open deletion
wd.find_element_by_css_selector("input[value='%s']" % id).click()
# submit deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.app.open_home_page()
self.group_cache = None
def select_first_contact(self):
self.select_contact_by_index(0)
def modify_first_contact(self):
self.modify_contact_by_index[0]
def modify_contact_by_index(self, index, new_contact_data):
wd = self.app.wd
self.open_contacts_page()
row = wd.find_elements_by_name("entry")[index]
cells = row.find_elements_by_tag_name("td")
cells[7].click()
# modify contact form
self.fill_contact_form(new_contact_data)
# submit contact creation
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def modify_contact_by_id(self, id, contact):
wd = self.app.wd
self.app.open_home_page()
# open modification form
checkbox = wd.find_element_by_css_selector("input[value='%s']" % id)
row = checkbox.find_element_by_xpath("./../..")
cells = row.find_elements_by_tag_name("td")
cells[7].click()
# fill group form
self.fill_contact_form(contact)
# submit changes
wd.find_element_by_name("update").click()
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_contact_by_id(self, id):
wd = self.app.wd
row = wd.find_element_by_css_selector("input[value='%s']" % id)
return row
def open_contacts_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("index.php") and len(wd.find_elements_by_id("MassCB")) > 0):
wd.find_element_by_link_text("strona główna").click()
def count(self):
wd = self.app.wd
self.open_contacts_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_contacts_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
firstname = cells[2].text
lastname = cells[1]. text
id = row.find_element_by_name("selected[]").get_attribute("value")
all_emails = cells[4].text
all_phones = cells[5].text
address = cells[3].text
self.contact_cache.append(Contact(first_name=firstname, last_name=lastname, id=id,
all_phones_from_home_page=all_phones,
all_emails_from_home_page=all_emails, address=address))
return list(self.contact_cache)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cells = row.find_elements_by_tag_name("td")[7]
cells.find_element_by_tag_name("a").click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.open_contacts_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_elements_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
homephone = wd.find_element_by_name("home").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
mobilephone = wd.find_element_by_name("mobile").get_attribute("value")
secondaryphone = wd.find_element_by_name("phone2").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
return Contact(first_name=firstname, last_name=lastname, id=id,
home_number=homephone, mobile_number=mobilephone, work_number=workphone,
second_private_number=secondaryphone,
first_email=email, second_email=email2, third_email=email3, address=address)
def get_contact_view_page(self, index):
wd = self.app.wd
self.open_contact_to_view_by_index(index)
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
secondaryphone = re.search("P: (.*)", text).group(1)
def open_contact_to_view_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cells = row.find_elements_by_tag_name("td")[6]
cells.find_element_by_tag_name("a").click()
def add_contact_to_group_by_id(self, id, group):
wd = self.app.wd
if not len(wd.find_elements_by_name("searchstring")) > 0:
self.app.open_home_page()
# add mew contact
wd.find_element_by_css_selector("input[value='%s']" % id).click()
number=group.id
wd.find_element_by_xpath("//select[@name='to_group']//option[@value='%s']"% number).click()
wd.find_element_by_name("add").click()
self.app.open_home_page()
self.contact_cache = None
def add_contact_to_group(self, Contact, group):
wd = self.app.wd
if not len(wd.find_elements_by_name("searchstring")) > 0:
self.app.open_home_page()
# add new contact
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(Contact)
number = group.id
wd.find_element_by_xpath("//div[@id='content']/form/select[5]//option[@value='%s']" % number).click()
# accept
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.app.open_home_page()
self.contact_cache = None
| apache-2.0 | -1,192,542,450,972,658,000 | 42.455319 | 109 | 0.600372 | false | 3.431452 | false | false | false |
wdv4758h/ZipPy | edu.uci.python.test/regressiontests/test_grammar.py | 1 | 33140 | # Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
#from test.support import run_unittest, check_syntax_error
# Currently test.support cannot be imported
import unittest
#import sys
# testing import *
#from sys import *
class TokenTests(unittest.TestCase):
def testBackslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
self.assertEqual(x, 2, 'backslash for line continuation')
# Backslash does not means continuation in comments :\
x = 0
self.assertEqual(x, 0, 'backslash ending comment')
def testPlainIntegers(self):
self.assertEqual(type(000), type(0))
self.assertEqual(0xff, 255)
self.assertEqual(0o377, 255)
self.assertEqual(2147483647, 0o17777777777)
self.assertEqual(0b1001, 9)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxsize
if maxsize == 2147483647:
self.assertEqual(-2147483647-1, -0o20000000000)
# XXX -2147483648
self.assertTrue(0o37777777777 > 0)
self.assertTrue(0xffffffff > 0)
self.assertTrue(0b1111111111111111111111111111111 > 0)
for s in ('2147483648', '0o40000000000', '0x100000000',
'0b10000000000000000000000000000000'):
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxsize == 9223372036854775807:
self.assertEqual(-9223372036854775807-1, -0o1000000000000000000000)
self.assertTrue(0o1777777777777777777777 > 0)
self.assertTrue(0xffffffffffffffff > 0)
self.assertTrue(0b11111111111111111111111111111111111111111111111111111111111111 > 0)
for s in '9223372036854775808', '0o2000000000000000000000', \
'0x10000000000000000', \
'0b100000000000000000000000000000000000000000000000000000000000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
else:
self.fail('Weird maxsize value %r' % maxsize)
def testLongIntegers(self):
x = 0
x = 0xffffffffffffffff
x = 0Xffffffffffffffff
x = 0o77777777777777777
x = 0O77777777777777777
x = 123456789012345678901234567890
x = 0b100000000000000000000000000000000000000000000000000000000000000000000
x = 0B111111111111111111111111111111111111111111111111111111111111111111111
def testFloats(self):
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
def testStringLiterals(self):
x = ''; y = ""; self.assertTrue(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assertTrue(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assertTrue(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
self.assertTrue(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
self.assertTrue(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
self.assertEqual(x, y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
self.assertEqual(x, y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
self.assertEqual(x, y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEqual(x, y)
# def testEllipsis(self):
# x = ...
# self.assertTrue(x is Ellipsis)
# self.assertRaises(SyntaxError, eval, ".. .")
class GrammarTests(unittest.TestCase):
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
# file_input: (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
# expr_input: testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
def testEvalInput(self):
# testlist ENDMARKER
x = eval('1, 0 or 1')
def testFuncdef(self):
### [decorators] 'def' NAME parameters ['->' test] ':' suite
### decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
### decorators: decorator+
### parameters: '(' [typedargslist] ')'
### typedargslist: ((tfpdef ['=' test] ',')*
### ('*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
### | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
### tfpdef: NAME [':' test]
### varargslist: ((vfpdef ['=' test] ',')*
### ('*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
### | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
### vfpdef: NAME
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
self.assertEqual(f2.__code__.co_varnames, ('one_argument',))
self.assertEqual(f3.__code__.co_varnames, ('two', 'arguments'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
# keyword argument type tests
try:
str('x', **{b'foo':1 })
except TypeError:
pass
else:
self.fail('Bytes should not work as keyword argument names')
# keyword only argument tests
# def pos0key1(*, key): return key
# pos0key1(key=100)
# def pos2key2(p1, p2, *, k1, k2=100): return p1,p2,k1,k2
# pos2key2(1, 2, k1=100)
# pos2key2(1, 2, k1=100, k2=200)
# pos2key2(1, 2, k2=100, k1=200)
# def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1,p2,k1,k2,kwarg
# pos2key2dict(1,2,k2=100,tokwarg1=100,tokwarg2=200)
# pos2key2dict(1,2,tokwarg1=100,tokwarg2=200, k2=100)
# keyword arguments after *arglist
def f(*args, **kwargs):
return args, kwargs
self.assertEqual(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
{'x':2, 'y':5}))
self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
# argument annotation tests
# def f(x) -> list: pass
# self.assertEqual(f.__annotations__, {'return': list})
# def f(x:int): pass
# self.assertEqual(f.__annotations__, {'x': int})
# def f(*x:str): pass
# self.assertEqual(f.__annotations__, {'x': str})
# def f(**x:float): pass
# self.assertEqual(f.__annotations__, {'x': float})
# def f(x, y:1+2): pass
# self.assertEqual(f.__annotations__, {'y': 3})
# def f(a, b:1, c:2, d): pass
# self.assertEqual(f.__annotations__, {'b': 1, 'c': 2})
# def f(a, b:1, c:2, d, e:3=4, f=5, *g:6): pass
# self.assertEqual(f.__annotations__,
# {'b': 1, 'c': 2, 'e': 3, 'g': 6})
# def f(a, b:1, c:2, d, e:3=4, f=5, *g:6, h:7, i=8, j:9=10,
# **k:11) -> 12: pass
# self.assertEqual(f.__annotations__,
# {'b': 1, 'c': 2, 'e': 3, 'g': 6, 'h': 7, 'j': 9,
# 'k': 11, 'return': 12})
# Check for SF Bug #1697248 - mixing decorators and a return annotation
# def null(x): return x
# @null
# def f(x) -> list: pass
# self.assertEqual(f.__annotations__, {'return': list})
#
# # test MAKE_CLOSURE with a variety of oparg's
# closure = 1
# def f(): return closure
# def f(x=1): return closure
# def f(*, k=1): return closure
# def f() -> int: return closure
# Check ast errors in *args and *kwargs
# check_syntax_error(self, "f(*g(1=2))")
# Currently test.support module is not supported, so check_syntax_error is handled as the following
self.assertRaises(SyntaxError, compile, "f(*g(1=2))", '<test string>', 'exec')
# check_syntax_error(self, "f(**g(1=2))")
self.assertRaises(SyntaxError, compile, "f(**g(1=2))", '<test string>', 'exec')
def testLambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = lambda : 0
self.assertEqual(l1(), 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0]]
self.assertEqual(l3(), [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
self.assertEqual(l4(), 1)
l5 = lambda x, y, z=2: x + y + z
self.assertEqual(l5(1, 2), 5)
self.assertEqual(l5(1, 2, 3), 6)
# check_syntax_error(self, "lambda x: x = 2")
# Currently test.support module is not supported, so check_syntax_error is handled as the following
self.assertRaises(SyntaxError, compile, "lambda x: x = 2", '<test string>', 'exec')
# check_syntax_error(self, "lambda (None,): None")
self.assertRaises(SyntaxError, compile, "lambda (None,): None", '<test string>', 'exec')
# l6 = lambda x, y, *, k=20: x+y+k
# self.assertEqual(l6(1,2), 1+2+20)
# self.assertEqual(l6(1,2,k=10), 1+2+10)
### stmt: simple_stmt | compound_stmt
# Tested below
def testSimpleStmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; pass; del x
def foo():
# verify statements that end with semi-colons
x = 1; pass; del x;
foo()
### small_stmt: expr_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt
# Tested below
def testExprStmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
# check_syntax_error(self, "x + 1 = 1")
# Currently test.support module is not supported, so check_syntax_error is handled as the following
self.assertRaises(SyntaxError, compile, "x + 1 = 1", '<test string>', 'exec')
# check_syntax_error(self, "a + 1 = b + 2")
self.assertRaises(SyntaxError, compile, "a + 1 = b + 2", '<test string>', 'exec')
def testDelStmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
xyz = x, y, z
del abc
del x, y, (z, xyz)
def testPassStmt(self):
# 'pass'
pass
# flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
def testBreakStmt(self):
# 'break'
while 1: break
def testContinueStmt(self):
# 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
if msg != "ok":
self.fail(msg)
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "ok"
if msg != "ok":
self.fail(msg)
def test_break_continue_loop(self):
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
def test_inner(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo != 1:
self.fail("continue then break in try/except in loop broken!")
test_inner()
def testReturn(self):
# 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
# check_syntax_error(self, "class foo:return 1")
# Currently test.support module is not supported, so check_syntax_error is handled as the following
self.assertRaises(SyntaxError, compile, "class foo:return 1", '<test string>', 'exec')
def testYield(self):
# check_syntax_error(self, "class foo:yield 1")
# Currently test.support module is not supported, so check_syntax_error is handled as the following
self.assertRaises(SyntaxError, compile, "class foo:yield 1", '<test string>', 'exec')
def testRaise(self):
# 'raise' test [',' test]
try: raise RuntimeError('just testing')
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
def testImport(self):
# 'import' dotted_as_names
import sys
import time, sys
# 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
# not testable inside a function, but already done at top of the module
# from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
def testGlobal(self):
# 'global' NAME (',' NAME)*
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
# def testNonlocal(self):
# # 'nonlocal' NAME (',' NAME)*
# x = 0
# y = 0
# def f():
# nonlocal x
# nonlocal x, y
def testAssert(self):
# assertTruestmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
try:
assert True
except AssertionError as e:
self.fail("'assert True' should not have raised an AssertionError")
try:
assert True, 'this should always pass'
except AssertionError as e:
self.fail("'assert True, msg' should not have "
"raised an AssertionError")
# these tests fail if python is run with -O, so check __debug__
@unittest.skipUnless(__debug__, "Won't work if __debug__ is False")
def testAssert2(self):
try:
assert 0, "msg"
except AssertionError as e:
self.assertEqual(e.args[0], "msg")
else:
self.fail("AssertionError not raised by assert 0")
try:
assert False
except AssertionError as e:
self.assertEqual(len(e.args), 0)
else:
self.fail("AssertionError not raised by 'assert False'")
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
def testIf(self):
# 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
def testWhile(self):
# 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
# Issue1920: "while 0" is optimized away,
# ensure that the "else" clause is still present.
x = 0
while 0:
x = 1
else:
x = 2
self.assertEqual(x, 2)
def testFor(self):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285:
self.fail('for over growing sequence')
result = []
for x, in [(1,), (2,), (3,)]:
result.append(x)
self.assertEqual(result, [1, 2, 3])
def testTry(self):
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr ['as' expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError as msg: pass
except RuntimeError as msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError) as msg: pass
try: pass
finally: pass
def testSuite(self):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
def testTest(self):
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
def testComparison(self):
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in 1 is 1 is not 1: pass
def testBinaryMaskOps(self):
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
def testShiftOps(self):
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
def testAdditiveOps(self):
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
def testMultiplicativeOps(self):
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
def testUnaryOps(self):
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
def testSelectors(self):
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
# A rough test of SF bug 1333982. http://python.org/sf/1333982
# The testing here is fairly incomplete.
# Test cases should include: commas with 1 and 2 colons
d = {}
d[1] = 1
d[1,] = 2
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
L.sort(key=lambda x: x if isinstance(x, tuple) else ())
self.assertEqual(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
def testAtoms(self):
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictsetmaker] '}' | NAME | NUMBER | STRING
### dictsetmaker: (test ':' test (',' test ':' test)* [',']) | (test (',' test)* [','])
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = {'one'}
x = {'one', 1,}
x = {'one', 'two', 'three'}
x = {2, 3, 4,}
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
def testClassdef(self):
# 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# decorators: decorator+
# decorated: decorators (classdef | funcdef)
def class_decorator(x): return x
@class_decorator
class G: pass
def testDictcomps(self):
# dictorsetmaker: ( (test ':' test (comp_for |
# (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [','])) )
nums = [1, 2, 3]
self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4})
def testListcomps(self):
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
self.assertEqual([(i, s) for i in nums for s in strs],
[(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
(2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
(4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
[(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
def test_in_func(l):
return [0 < x < 3 for x in l if x > 2]
self.assertEqual(test_in_func(nums), [False, False, False])
def test_nested_front():
self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
[[1, 2], [3, 4], [5, 6]])
test_nested_front()
# check_syntax_error(self, "[i, s for i in nums for s in strs]")
# Currently test.support module is not supported, so check_syntax_error is handled as the following
self.assertRaises(SyntaxError, compile, "[i, s for i in nums for s in strs]", '<test string>', 'exec')
# check_syntax_error(self, "[x if y]")
self.assertRaises(SyntaxError, compile, "[x if y]", '<test string>', 'exec')
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
x = [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
('Macdonalds', 'Cheeseburger')])
def testGenexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(next(g), [x for x in range(10)])
try:
next(g)
self.fail('should produce StopIteration exception')
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
next(g)
self.fail('should produce TypeError')
except TypeError:
pass
self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
self.assertEqual(sum(b), sum([x for x in range(10)]))
self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
# check_syntax_error(self, "foo(x for x in range(10), 100)")
# Currently test.support module is not supported, so check_syntax_error is handled as the following
self.assertRaises(SyntaxError, compile, "foo(x for x in range(10), 100)", '<test string>', 'exec')
# check_syntax_error(self, "foo(100, x for x in range(10))")
self.assertRaises(SyntaxError, compile, "foo(100, x for x in range(10))", '<test string>', 'exec')
def testComprehensionSpecials(self):
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
self.assertEqual(len(list(g)), 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
# Grammar allows multiple adjacent 'if's in listcomps and genexps,
# even though it's silly. Make sure it works (ifelse broke this.)
self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
# verify unpacking single element tuples in listcomp/genexp.
self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
def test_with_statement(self):
class manager(object):
def __enter__(self):
return (1, 2)
def __exit__(self, *args):
pass
with manager():
pass
with manager() as x:
pass
with manager() as (x, y):
pass
with manager(), manager():
pass
with manager() as x, manager() as y:
pass
with manager() as x, manager():
pass
def testIfElseExpr(self):
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print(x)
return ret
# the next line is not allowed anymore
#self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
self.assertEqual((5 and 6 if 0 else 1), 1)
self.assertEqual(((5 and 6) if 0 else 1), 1)
self.assertEqual((5 and (6 if 1 else 1)), 6)
self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
self.assertEqual((not 5 if 1 else 1), False)
self.assertEqual((not 5 if 0 else 1), 1)
self.assertEqual((6 + 1 if 1 else 2), 7)
self.assertEqual((6 - 1 if 1 else 2), 5)
self.assertEqual((6 * 2 if 1 else 4), 12)
self.assertEqual((6 / 2 if 1 else 3), 3)
self.assertEqual((6 < 4 if 0 else 2), 2)
def test_paren_evaluation(self):
self.assertEqual(16 // (4 // 2), 8)
self.assertEqual((16 // 4) // 2, 2)
self.assertEqual(16 // 4 // 2, 2)
self.assertTrue(False is (2 is 3))
self.assertFalse((False is 2) is 3)
self.assertFalse(False is 2 is 3)
# def test_main():
# run_unittest(TokenTests, GrammarTests)
if __name__ == '__main__':
#test_main()
unittest.main()
| bsd-3-clause | -3,071,993,888,614,629,400 | 33.129763 | 132 | 0.490616 | false | 3.379908 | true | false | false |
z01nl1o02/tests | text/mlpbase.py | 1 | 8554 | import os,sys,pdb,pickle
import numpy as np
import cv2
import theano
import theano.tensor as T
import random
#you may try several times to get a good model and the init 'cost' may be quite large, 78 .e.g.
class Layer(object):
"""
a layer is a maxtrix with row = output of this layer and col = output of
previous layer
"""
def __init__(self, W_init, b_init, activation):
n_output,n_input = W_init.shape
assert b_init.shape == (n_output,)
self.W = theano.shared(value=W_init.astype(theano.config.floatX),
name="W",
borrow=True)
self.b = theano.shared(value=b_init.reshape(n_output,1).astype(theano.config.floatX),
name="b",
borrow=True,
broadcastable=(False,True))
self.activation = activation
self.params = [self.W, self.b]
def output(self,x):
lin_output = T.dot(self.W,x) + self.b
return (lin_output if self.activation is None else self.activation(lin_output))
class MLP(object):
def __init__(self, W_init, b_init, activations):
assert len(W_init) == len(b_init) == len(activations)
self.layers = []
for W,b,activation in zip(W_init, b_init, activations):
self.layers.append(Layer(W,b,activation))
self.params = []
for layer in self.layers:
self.params += layer.params
def output(self,x):
for layer in self.layers:
x = layer.output(x)
return x
def squared_error(self,x,y):
return T.sum((self.output(x) - y) ** 2)
return T.mean((self.output(x) - y) ** 2)
def cvt2c(self):
line = ""
for param in self.params:
parval = param.get_value()
line += "%d"%(parval.shape[0]) + ',' + "%d"%(parval.shape[1]) + ',\n'
for y in range(parval.shape[0]):
for x in range(parval.shape[1]):
line += "%lf"%(parval[y,x])+ ','
line += '\n'
return line
class MLP_PROXY(object):
def __init__(self, modelpath):
self._train = None
self._predict = None
self._cost = None
self._minmax = None
self._modelpath = modelpath
self._mlp = None
def gradient_updates_momentum(self,cost, params, learning_rate, momentum):
assert momentum < 1 and momentum >= 0
updates = []
for param in params:
param_update = theano.shared(param.get_value() * 0., broadcastable=param.broadcastable)
updates.append((param, param - learning_rate * param_update))
updates.append((param_update, momentum * param_update + (1. - momentum)*T.grad(cost, param)))
return updates
def write_in_c_format(self,outpath):
line = ""
for m0,m1 in zip(self._minmax[0], self._minmax[1]):
line += "%lf,%lf,"%(m0,m1)
line += '\n'
line += self._mlp.cvt2c()
with open(outpath, 'w') as f:
f.writelines(line)
return
def create(self, layer_sizes, learning_rate = 0.01, momentum = 0.6):
W_init = []
b_init = []
activations = []
for n_input, n_output in zip(layer_sizes[:-1], layer_sizes[1:]):
W_init.append(np.random.randn(n_output, n_input))
b_init.append(np.random.randn(n_output))
activations.append(T.nnet.sigmoid)
mlp = MLP(W_init, b_init, activations)
mlp_input = T.matrix('mlp_input')
mlp_target = T.matrix('mlp_target')
self._cost = mlp.squared_error(mlp_input, mlp_target)
self._train = theano.function([mlp_input,mlp_target], self._cost, updates=self.gradient_updates_momentum(self._cost, mlp.params, learning_rate, momentum))
self._predict = theano.function([mlp_input], mlp.output(mlp_input))
self._mlp = mlp
return
def train(self,samples, targets, max_iteration=5000, min_cost = 0.01):
#samples and targets : (samples num) X (feature dimenstion)
iteration = 0
samplesT = np.transpose(samples) #W*x + b
targetsT = np.transpose(targets)
batchsize = 5
echostep = max_iteration / 10
if echostep > 1000:
echostep = 1000
while iteration < max_iteration:
cost = 0
total = 0
for k in range(0,samplesT.shape[1],batchsize):
kk = k
if kk + batchsize > samplesT.shape[1]:
kk = samplesT.shape[1] - batchsize
s = np.reshape(samplesT[:,kk:kk+batchsize],(-1,batchsize))
t = np.reshape(targetsT[:,kk:kk+batchsize],(-1,batchsize))
current_cost = self._train(s,t)
cost = cost + current_cost.sum()
total += batchsize
if (1+iteration)% echostep == 0:
print iteration + 1, ',', cost
if cost < min_cost:
break
iteration += 1
return
def predict(self,samples):
samplesT = np.transpose(samples) #W*x + b
output = self._predict(samplesT)
targets = np.transpose(output)
return targets
def pre_normalization(self, samples):
m0 = samples[0,:]
m1 = samples[0,:]
for k in range(1,samples.shape[0]):
m0 = np.minimum(samples[k,:],m0)
m1 = np.maximum(samples[k,:],m1)
self._minmax = (m0,m1)
return
def normalization(self, samples, u=1, l=-1):
if None == self._minmax:
return None
m0,m1 = self._minmax
rng = m1 - m0
tmp = np.ones(rng.shape)
for k in range(len(rng)):
if rng[k] < 0.001:
rng[k] = 1
tmp[k] = 0
ratio = tmp / rng
for k in range(samples.shape[0]):
feat = samples[k,:]
feat = (feat - m0) * ratio * (u - l) + l
idx = feat>u
feat[idx] = u
idx = feat<l
feat[idx] = l
samples[k,:] = feat
return samples
def shuffle(self, samples, targets):
totalnum = samples.shape[0]
idx = range(totalnum)
random.shuffle(idx)
rnd_samples = np.zeros(samples.shape)
rnd_targets = np.zeros(targets.shape)
for k in range(len(idx)):
i = idx[k]
rnd_samples[k,:] = samples[i,:]
rnd_targets[k,:] = targets[i,:]
return (rnd_samples, rnd_targets)
def target_vec2mat(self, target_list, labelnum, hvalue = 1.0, lvalue = 0.0):
#0-based
targetnum = len(target_list)
targets = np.zeros((targetnum, labelnum))
for k in range(targetnum):
for j in range(labelnum):
targets[k,j] = lvalue
for j in target_list[k]:
targets[k,j] = hvalue
return targets
def target_mat2vec(self, targets, labelnum, thresh = 0.5):
target_list = []
if thresh > 0:
for k in range(targets.shape[0]):
l = []
for j in range(targets.shape[1]):
if targets[k,j] >= thresh:
l.append(j)
target_list.append(l)
if thresh < -1024.0:
for k in range(targets.shape[0]):
l = []
m1 = targets[k,:].max()
for j in range(targets.shape[1]):
if np.abs(targets[k,j] - m1) < 0.01:
l.append((j,m1)) #label and confidence
target_list.append(l)
else: #top value
for k in range(targets.shape[0]):
l = []
m1 = targets[k,:].max()
for j in range(targets.shape[1]):
if np.abs(targets[k,j] - m1) < 0.01:
l.append(j)
target_list.append(l)
return target_list
def save(self):
if None == self._modelpath:
return -1
with open(self._modelpath, 'wb') as f:
pickle.dump((self._cost, self._train, self._predict, self._minmax,self._mlp), f)
return 0
def load(self):
if None == self._modelpath:
return -1
with open(self._modelpath, 'rb') as f:
self._cost, self._train, self._predict, self._minmax, self._mlp = pickle.load(f)
return 0
| gpl-2.0 | 3,815,222,287,935,987,700 | 35.4 | 162 | 0.513561 | false | 3.653994 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.