repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
webmull/phantomjs | src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/message_listener.py | 590 | 3354 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines a listener interface for observing certain
state transitions on Message objects.
Also defines a null implementation of this interface.
"""
__author__ = '[email protected] (Will Robinson)'
class MessageListener(object):
"""Listens for modifications made to a message. Meant to be registered via
Message._SetListener().
Attributes:
dirty: If True, then calling Modified() would be a no-op. This can be
used to avoid these calls entirely in the common case.
"""
def Modified(self):
"""Called every time the message is modified in such a way that the parent
message may need to be updated. This currently means either:
(a) The message was modified for the first time, so the parent message
should henceforth mark the message as present.
(b) The message's cached byte size became dirty -- i.e. the message was
modified for the first time after a previous call to ByteSize().
Therefore the parent should also mark its byte size as dirty.
Note that (a) implies (b), since new objects start out with a client cached
size (zero). However, we document (a) explicitly because it is important.
Modified() will *only* be called in response to one of these two events --
not every time the sub-message is modified.
Note that if the listener's |dirty| attribute is true, then calling
Modified at the moment would be a no-op, so it can be skipped. Performance-
sensitive callers should check this attribute directly before calling since
it will be true most of the time.
"""
raise NotImplementedError
class NullMessageListener(object):
"""No-op MessageListener implementation."""
def Modified(self):
pass
| bsd-3-clause |
AdamPI314/SOHR | include/tools/check_available_hosts.py | 2 | 2084 | #/usr/bin/env python
import subprocess as sp
import numpy as np
import re
import signal
def check_host():
cmd= "echo `hostname`"
pid=sp.Popen(cmd, shell= True, stdout=sp.PIPE, stderr= sp.PIPE)
out, err= pid.communicate()
if "linus" in out:
host= "linus"
elif "pople" in out:
host= "pople"
pid.wait()
return host
def check_nodes():
#cmd= "python sr_check_nodes.py"
cmd= "/tcghome/sbai/sr_tools/my_bin/check-nodes-timeout.sh -t 300"
pid=sp.Popen(cmd, shell= True, stdout=sp.PIPE, stderr= sp.PIPE)
out, err= pid.communicate()
return out
def parse_and_w2f(filename, check_nodes_out, criteria):
#with open("check_nodes_out.log", 'w') as f:
# f.write(check_nodes_out)
#f.close()
#with open("check_nodes_out.log", 'r') as f:
# check_nodes_out=f.read()
#f.close()
#match match the whole string from ^ to $
#search return the first match condition
#findall return all match condition
#m = re.search("(compute-\d+-\d+|pop\d+):\s+.+load average:\s+(\d+.\d+),\s+(\d+.\d+),\s+(\d+.\d+)",
# check_nodes_out)
m = re.findall("(compute-\d+-\d+|pop\d+):\s+.+load average:\s+(\d+.\d+),\s+(\d+.\d+),\s+(\d+.\d+)",
check_nodes_out)
if m:
#print m.groups()
avail_nodes= [node for node in m if float(node[-3])<=criteria and float(node[-2])<=criteria and float(node[-1])<=criteria]
#got to sort node based on nodes[-1]+nodes[-2]+nodes[-3]
avail_nodes= sorted(avail_nodes, key= lambda x:float(x[-1])+float(x[-2])+float(x[-3]))
with open(filename, 'w') as f_host:
f_host.write("# This is a sample host file\n")
for node in avail_nodes:
f_host.write(node[0]+":4 # The next 4 procs run on this host, "+node[-3]+" "+node[-2]+" "+node[-1]+"\n")
f_host.close()
if __name__== "__main__":
#print check_host()
print "check nodes (walltime=300 seconds)..."
print "might take longer on pople...\n"
check_nodes_out=[]
check_nodes_out= check_nodes()
criteria=15.0; filename= "hosts"
print "search nodes with current load less than ", criteria, "..."
print "write to file ", filename, "...\n"
parse_and_w2f(filename, check_nodes_out, criteria)
| mit |
pyjs/pyjs | pyjswidgets/pyjamas/ui/FormPanel.ie6.py | 7 | 1124 | class FormPanel:
def getTextContents(self, iframe):
JS("""
try {
if (!@{{iframe}}['contentWindow']['document'])
return null;
return @{{iframe}}['contentWindow']['document']['body']['innerText'];
} catch (e) {
return null;
}
""")
def hookEvents(self, iframe, form, listener):
JS("""
if (@{{iframe}}) {
@{{iframe}}['onreadystatechange'] = function() {
if (!@{{iframe}}['__formAction'])
return;
if (@{{iframe}}['readyState'] == 'complete') {
@{{listener}}['onFrameLoad']();
}
};
}
@{{form}}['onsubmit'] = function() {
if (@{{iframe}})
@{{iframe}}['__formAction'] = @{{form}}['action'];
return @{{listener}}['onFormSubmit']();
};
""")
def unhookEvents(self, iframe, form):
JS("""
if (@{{iframe}})
@{{iframe}}['onreadystatechange'] = null;
@{{form}}['onsubmit'] = null;
""")
| apache-2.0 |
efiring/numpy-work | numpy/testing/decorators.py | 2 | 5819 | """Decorators for labeling test objects
Decorators that merely return a modified version of the original
function object are straightforward. Decorators that return a new
function object need to use
nose.tools.make_decorator(original_function)(decorator) in returning
the decorator, in order to preserve metadata such as function name,
setup and teardown functions and so on - see nose.tools for more
information.
"""
def slow(t):
"""Labels a test as 'slow'.
The exact definition of a slow test is obviously both subjective and
hardware-dependent, but in general any individual test that requires more
than a second or two should be labeled as slow (the whole suite consits of
thousands of tests, so even a second is significant)."""
t.slow = True
return t
def setastest(tf=True):
''' Signals to nose that this function is or is not a test
Parameters
----------
tf : bool
If True specifies this is a test, not a test otherwise
e.g
>>> from numpy.testing.decorators import setastest
>>> @setastest(False)
... def func_with_test_in_name(arg1, arg2): pass
...
>>>
This decorator cannot use the nose namespace, because it can be
called from a non-test module. See also istest and nottest in
nose.tools
'''
def set_test(t):
t.__test__ = tf
return t
return set_test
def skipif(skip_condition, msg=None):
''' Make function raise SkipTest exception if skip_condition is true
Parameters
----------
skip_condition : bool or callable.
Flag to determine whether to skip test. If the condition is a
callable, it is used at runtime to dynamically make the decision. This
is useful for tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a SkipTest exception
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
You will see from the code that we had to further decorate the
decorator with the nose.tools.make_decorator function in order to
transmit function name, and various other metadata.
'''
def skip_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
# Allow for both boolean or callable skip conditions.
if callable(skip_condition):
skip_val = lambda : skip_condition()
else:
skip_val = lambda : skip_condition
def get_msg(func,msg=None):
"""Skip message with information about function being skipped."""
if msg is None:
out = 'Test skipped due to test condition'
else:
out = '\n'+msg
return "Skipping test: %s%s" % (func.__name__,out)
# We need to define *two* skippers because Python doesn't allow both
# return with value and yield inside the same function.
def skipper_func(*args, **kwargs):
"""Skipper for normal test functions."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
return f(*args, **kwargs)
def skipper_gen(*args, **kwargs):
"""Skipper for test generators."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
for x in f(*args, **kwargs):
yield x
# Choose the right skipper to use when building the actual decorator.
if nose.util.isgenerator(f):
skipper = skipper_gen
else:
skipper = skipper_func
return nose.tools.make_decorator(f)(skipper)
return skip_decorator
def knownfailureif(fail_condition, msg=None):
''' Make function raise KnownFailureTest exception if fail_condition is true
Parameters
----------
fail_condition : bool or callable.
Flag to determine whether to mark test as known failure (True)
or not (False). If the condition is a callable, it is used at
runtime to dynamically make the decision. This is useful for
tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a KnownFailureTest exception
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
You will see from the code that we had to further decorate the
decorator with the nose.tools.make_decorator function in order to
transmit function name, and various other metadata.
'''
if msg is None:
msg = 'Test skipped due to known failure'
# Allow for both boolean or callable known failure conditions.
if callable(fail_condition):
fail_val = lambda : fail_condition()
else:
fail_val = lambda : fail_condition
def knownfail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
from noseclasses import KnownFailureTest
def knownfailer(*args, **kwargs):
if fail_val():
raise KnownFailureTest, msg
else:
return f(*args, **kwargs)
return nose.tools.make_decorator(f)(knownfailer)
return knownfail_decorator
| bsd-3-clause |
anupam2221/isprime | isprime.py | 1 | 1522 | ##written 9-6-2017
##by Anupam KP (c)
##[email protected]
##
##
##preprocessing time- O(n loglogn)
##Tells if a prime number in O(1)
dexterprime=[0]*500
def akpprocess(n):
n=int(n)
global dexterprime
dexterprime=[0]*(n+1)
dexterprime[0]=dexterprime[1]=1
for i in range(2,n+1):
for j in range(2,i+1):
if (i*j)>(n):
break
if dexterprime[j]==0:
dexterprime[i*j]=1
#print primetell.d
def isprime(x):
global dexterprime
try:
if dexterprime[x]==0:
return True
else:
return False
except:
print "you haven't made the seive of the length of the number you have provided.\n Please preprocess with akpprocess("+str(x)+")"
class primetell:
d=[]
def __init__(self,n):
self.d=[]
self.preprocess(n)
def preprocess(self,n):
n=int(n)
self.d=[0]*(n+1)
self.d[0]=self.d[1]=1
for i in range(2,n+1):
for j in range(2,i+1):
if (i*j)>n+1:
break
if self.d[j]==0:
self.d[i*j]=1
#print primetell.d
def isprime(self,x):
if self.d[x]==0:
return True
else:
return False
if __name__=="__main__":
akpprocess(20)
print dexterprime
print isprime(11)
| mit |
mumble-voip/libmumble-gyp | test/generator-output/gyptest-relocate.py | 216 | 1670 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a project hierarchy created with the --generator-output=
option can be built even when it's relocated to a different path.
"""
import TestGyp
# Android doesn't support --generator-output.
test = TestGyp.TestGyp(formats=['!android'])
test.writable(test.workpath('src'), False)
test.run_gyp('prog1.gyp',
'-Dset_symroot=1',
'--generator-output=' + test.workpath('gypfiles'),
chdir='src')
test.writable(test.workpath('src'), True)
test.relocate('src', 'relocate/src')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/src'), False)
test.writable(test.workpath('relocate/src/build'), True)
test.writable(test.workpath('relocate/src/subdir2/build'), True)
test.writable(test.workpath('relocate/src/subdir3/build'), True)
test.build('prog1.gyp', test.ALL, chdir='relocate/gypfiles')
chdir = 'relocate/gypfiles'
expect = """\
Hello from %s
Hello from inc.h
Hello from inc1/include1.h
Hello from inc2/include2.h
Hello from inc3/include3.h
Hello from subdir2/deeper/deeper.h
"""
if test.format == 'xcode':
chdir = 'relocate/src'
test.run_built_executable('prog1', chdir=chdir, stdout=expect % 'prog1.c')
if test.format == 'xcode':
chdir = 'relocate/src/subdir2'
test.run_built_executable('prog2', chdir=chdir, stdout=expect % 'prog2.c')
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
test.run_built_executable('prog3', chdir=chdir, stdout=expect % 'prog3.c')
test.pass_test()
| bsd-3-clause |
ninemoreminutes/lmiapi | lmiapi/central.py | 1 | 7685 | # Python
import json
import logging
import re
import urllib
import urlparse
import warnings
import webbrowser
# Requests
import requests
# BeautifulSoup4
from bs4 import BeautifulSoup
__all__ = ['LogMeInCentralAPI']
logger = logging.getLogger('lmiapi.central')
class LogMeInCentralAPI(object): # pragma: no cover
API_ROOT = 'https://secure.logmein.com/api/'
def __init__(self, email, password):
warnings.warn('The LogMeInCentralAPI class is no longer maintained or '
'supported; use LogMeInPublicAPI instead.',
DeprecationWarning)
self.email = email
self.password = password
self.session = requests.Session()
self.session.headers.update({'Accept': 'application/JSON'})
self.current_profile_id = None
def _post(self, path, data=None):
url = '%s%s' % (self.API_ROOT, path.lstrip('/'))
headers = {'Content-Type': 'application/JSON'}
data = json.dumps(data or {})
response = self.session.post(url, data=data, headers=headers)
if response.status_code == 401 and self.login():
response = self.session.post(url, data=data, headers=headers)
response.raise_for_status()
logger.debug('POST %s -> %d', url, response.status_code)
return response.json()
def _update_current_profile_id(self, soup):
profile_id = None
alt_link = soup.find('link', rel='alternate', href=re.compile(r'^.*?profileid=\d+?.*?$'))
if alt_link:
alt_parts = urlparse.urlsplit(alt_link['href'])
alt_query = urlparse.parse_qs(alt_parts.query)
profile_id = int(alt_query.get('profileid', ['0'])[0])
self.current_profile_id = profile_id or None
logger.debug('current profile id: %s', str(self.current_profile_id))
return self.current_profile_id
def login(self):
# Read main LogMeIn page at secure.logmein.com.
url = urlparse.urljoin(self.API_ROOT, '/')
response = self.session.get(url)
response.raise_for_status()
# Find login button link.
soup = BeautifulSoup(response.text)
btn_login = soup.find('a', attrs={'class': 'btn-login', 'href': True})
if not btn_login:
raise RuntimeError('Unable to find login button link!')
login_url = urlparse.urljoin(response.url, btn_login['href'])
# Follow the login link.
response = self.session.get(login_url)
response.raise_for_status()
# Try to find the current profile ID in the response. If found, we're
# already logged in.
soup = BeautifulSoup(response.text)
profile_id = self._update_current_profile_id(soup)
if profile_id:
return profile_id
# Otherwise, we were redirected to the login page, so find the login
# form and build up the auth data to send.
form = soup.find('form', id='form', action=True)
if not form:
raise RuntimeError('No login form could be found!')
auth_url = urlparse.urljoin(response.url, form['action'])
auth_method = form.attrs.get('method', 'POST').lower()
fields = form.find_all('input', attrs={'name': True})
auth_data = {}
for field in fields:
name = field['name']
if name == 'email':
value = self.email
elif name == 'password':
value = self.password
else:
value = field.attrs.get('value', '')
auth_data[name] = value
# Now submit the login form with the auth data filled in.
logger.debug('auth url: %s %s', auth_method.upper(), auth_url)
logger.debug('auth data: %r', auth_data)
response = getattr(self.session, auth_method)(auth_url, auth_data)
response.raise_for_status()
# Look for the current profile ID in the response.
soup = BeautifulSoup(response.text)
return self._update_current_profile_id(soup)
def select_profile(self, profile_id):
# Get the URL used to switch to a new profile.
url = urlparse.urljoin(self.API_ROOT, '/login/selectprofile.aspx?profile=%d' % profile_id)
response = self.session.get(url)
response.raise_for_status()
# Look for the new profile ID in the response.
soup = BeautifulSoup(response.text)
return self._update_current_profile_id(soup)
def get_user_profile_list(self):
result = self._post('ProfileList.svc/GetUserProfileList')
return dict([(x['Id'], x['Name']) for x in result['GetUserProfileListResult']['List']])
def get_all_hosts(self):
result = self._post('Computers.svc/GetAllHostsForCentral')
return result
def get_host_details(self, host_id):
url = urlparse.urljoin(self.API_ROOT, '/mycomputers_preferences.asp')
response = self.session.get(url, params={'hostid': host_id})
response.raise_for_status()
soup = BeautifulSoup(response.text)
host_details = {}
for hostid_input in soup.select('fieldset > input[name="hostid"]'):
host_details[u'hostid'] = int(hostid_input['value'])
for profileid_input in soup.select('fieldset input[name="profileid"]'):
host_details[u'profileid'] = int(profileid_input['value'])
for tr in soup.select('fieldset table tr'):
for n, td in enumerate(tr.find_all('td', recursive=False)):
if n == 0:
key_parts = td.get_text(strip=True).replace(':', '').split()
key_parts = [x.strip().title() for x in key_parts]
key_parts[0] = key_parts[0].lower()
key = u''.join(key_parts)
if key == 'status':
key = u'statusString'
elif key == 'group':
key = u'groupName'
elif n == 1:
if key == 'computerDescription':
value = td.find('input', attrs={'name': 'desc'})['value']
elif key == 'statusString':
value = td.get_text('|', strip=True).split('|')[0]
a_tag = td.find('a', href=True)
if a_tag:
host_details[u'connectUrl'] = urlparse.urljoin(response.url, a_tag['href'])
elif key == 'groupName':
selected_option = td.find('option', selected=True)
value = selected_option.get_text()
host_details[u'groupid'] = int(selected_option['value'])
elif key == 'note':
value = td.find('textarea').get_text()
else:
value = td.get_text(strip=True)
host_details[key] = value
return host_details
def get_host_av_info(self, host_id):
result = self._post('AntiVirus.svc/GetHostAVInfo', {'hostId': host_id})
return result['GetHostAVInfoResult']
def connect_to_host(self, host_id):
url = urlparse.urljoin(self.API_ROOT, '/mycomputers_connect.asp')
qs = urllib.urlencode({'hostid': host_id})
url = '%s?%s' % (url, qs)
webbrowser.open_new_tab(url)
return
response = self.session.get(url, params={'hostid': host_id})
response.raise_for_status()
soup = BeautifulSoup(response.text)
meta = soup.find('meta', attrs={'http-equiv': 'refresh', 'content': True})
url = meta['content'].split(';URL=', 1)[1]
response = self.session.get(url)
| bsd-3-clause |
jlspyaozhongkai/Uter | third_party_build/Python-2.7.9/lib/python2.7/lib-tk/tkCommonDialog.py | 193 | 1418 | # base class for tk common dialogues
#
# this module provides a base class for accessing the common
# dialogues available in Tk 4.2 and newer. use tkFileDialog,
# tkColorChooser, and tkMessageBox to access the individual
# dialogs.
#
# written by Fredrik Lundh, May 1997
#
from Tkinter import *
class Dialog:
command = None
def __init__(self, master=None, **options):
# FIXME: should this be placed on the module level instead?
if TkVersion < 4.2:
raise TclError, "this module requires Tk 4.2 or newer"
self.master = master
self.options = options
if not master and options.get('parent'):
self.master = options['parent']
def _fixoptions(self):
pass # hook
def _fixresult(self, widget, result):
return result # hook
def show(self, **options):
# update instance options
for k, v in options.items():
self.options[k] = v
self._fixoptions()
# we need a dummy widget to properly process the options
# (at least as long as we use Tkinter 1.63)
w = Frame(self.master)
try:
s = w.tk.call(self.command, *w._options(self.options))
s = self._fixresult(w, s)
finally:
try:
# get rid of the widget
w.destroy()
except:
pass
return s
| gpl-3.0 |
h3biomed/ansible-modules-core | network/vyos/vyos_facts.py | 29 | 9161 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: vyos_facts
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
short_description: Collect facts from remote devices running OS
description:
- Collects a base set of device facts from a remote device that
is running VyOS. This module prepends all of the
base network fact keys with U(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: vyos
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: "!config"
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node
vars:
cli:
host: "{{ inventory_hostname }}"
username: vyos
password: vyos
transport: cli
- name: collect all facts from the device
vyos_facts:
gather_subset: all
- name: collect only the config and default facts
vyos_facts:
gather_subset: config
- name: collect everything exception the config
vyos_facts:
gather_subset: "!config"
"""
RETURN = """
ansible_net_config:
description: The running-config from the device
returned: when config is configured
type: str
ansible_net_commits:
description: The set of available configuration revisions
returned: when present
type: list
ansible_net_hostname:
description: The configured system hostname
returned: always
type: str
ansible_net_model:
description: The device model string
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the device
returned: always
type: str
ansible_net_version:
description: The version of the software running
returned: always
type: str
ansible_net_neighbors:
description: The set of LLDP neighbors
returned: when interface is configured
type: list
ansible_net_gather_subset:
description: The list of subsets gathered by the module
returned: always
type: list
"""
import re
import ansible.module_utils.vyos
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.netcli import CommandRunner
from ansible.module_utils.network import NetworkModule
from ansible.module_utils.six import iteritems
class FactsBase(object):
def __init__(self, runner):
self.runner = runner
self.facts = dict()
self.commands()
def commands(self):
raise NotImplementedError
class Default(FactsBase):
def commands(self):
self.runner.add_command('show version')
self.runner.add_command('show host name')
def populate(self):
data = self.runner.get_command('show version')
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['hostname'] = self.runner.get_command('show host name')
def parse_version(self, data):
match = re.search(r'Version:\s*(\S+)', data)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'HW model:\s*(\S+)', data)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'HW S/N:\s+(\S+)', data)
if match:
return match.group(1)
class Config(FactsBase):
def commands(self):
self.runner.add_command('show configuration commands')
self.runner.add_command('show system commit')
def populate(self):
config = self.runner.get_command('show configuration commands')
self.facts['config'] = str(config).split('\n')
commits = self.runner.get_command('show system commit')
entries = list()
entry = None
for line in commits.split('\n'):
match = re.match(r'(\d+)\s+(.+)by(.+)via(.+)', line)
if match:
if entry:
entries.append(entry)
entry = dict(revision=match.group(1),
datetime=match.group(2),
by=str(match.group(3)).strip(),
via=str(match.group(4)).strip(),
comment=None)
else:
entry['comment'] = line.strip()
self.facts['commits'] = entries
class Neighbors(FactsBase):
def commands(self):
self.runner.add_command('show lldp neighbors')
self.runner.add_command('show lldp neighbors detail')
def populate(self):
all_neighbors = self.runner.get_command('show lldp neighbors')
if 'LLDP not configured' not in all_neighbors:
neighbors = self.parse(
self.runner.get_command('show lldp neighbors detail')
)
self.facts['neighbors'] = self.parse_neighbors(neighbors)
def parse(self, data):
parsed = list()
values = None
for line in data.split('\n'):
if len(line) == 0:
continue
elif line[0] == ' ':
values += '\n%s' % line
elif line.startswith('Interface'):
if values:
parsed.append(values)
values = line
return parsed
def parse_neighbors(self, data):
facts = dict()
for item in data:
interface = self.parse_interface(item)
host = self.parse_host(item)
port = self.parse_port(item)
if interface not in facts:
facts[interface] = list()
facts[interface].append(dict(host=host, port=port))
return facts
def parse_interface(self, data):
match = re.search(r'^Interface:\s+(\S+),', data)
return match.group(1)
def parse_host(self, data):
match = re.search(r'SysName:\s+(.+)$', data, re.M)
if match:
return match.group(1)
def parse_port(self, data):
match = re.search(r'PortDescr:\s+(.+)$', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
neighbors=Neighbors,
config=Config
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
module = NetworkModule(argument_spec=spec, supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
runner = CommandRunner(module)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](runner))
runner.run()
try:
for inst in instances:
inst.populate()
facts.update(inst.facts)
except Exception:
exc = get_exception()
module.fail_json(msg='unknown failure', output=runner.items, exc=str(exc))
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts)
if __name__ == '__main__':
main()
| gpl-3.0 |
BT-rmartin/odoo | addons/web_diagram/controllers/main.py | 268 | 4321 | import openerp
from openerp.tools.safe_eval import safe_eval as eval
class DiagramView(openerp.http.Controller):
@openerp.http.route('/web_diagram/diagram/get_diagram_info', type='json', auth='user')
def get_diagram_info(self, req, id, model, node, connector,
src_node, des_node, label, **kw):
visible_node_fields = kw.get('visible_node_fields',[])
invisible_node_fields = kw.get('invisible_node_fields',[])
node_fields_string = kw.get('node_fields_string',[])
connector_fields = kw.get('connector_fields',[])
connector_fields_string = kw.get('connector_fields_string',[])
bgcolors = {}
shapes = {}
bgcolor = kw.get('bgcolor','')
shape = kw.get('shape','')
if bgcolor:
for color_spec in bgcolor.split(';'):
if color_spec:
colour, color_state = color_spec.split(':')
bgcolors[colour] = color_state
if shape:
for shape_spec in shape.split(';'):
if shape_spec:
shape_colour, shape_color_state = shape_spec.split(':')
shapes[shape_colour] = shape_color_state
ir_view = req.session.model('ir.ui.view')
graphs = ir_view.graph_get(
int(id), model, node, connector, src_node, des_node, label,
(140, 180), req.session.context)
nodes = graphs['nodes']
transitions = graphs['transitions']
isolate_nodes = {}
for blnk_node in graphs['blank_nodes']:
isolate_nodes[blnk_node['id']] = blnk_node
else:
y = map(lambda t: t['y'],filter(lambda x: x['y'] if x['x']==20 else None, nodes.values()))
y_max = (y and max(y)) or 120
connectors = {}
list_tr = []
for tr in transitions:
list_tr.append(tr)
connectors.setdefault(tr, {
'id': int(tr),
's_id': transitions[tr][0],
'd_id': transitions[tr][1]
})
connector_tr = req.session.model(connector)
connector_ids = connector_tr.search([('id', 'in', list_tr)], 0, 0, 0, req.session.context)
data_connectors =connector_tr.read(connector_ids, connector_fields, req.session.context)
for tr in data_connectors:
transition_id = str(tr['id'])
_sourceid, label = graphs['label'][transition_id]
t = connectors[transition_id]
t.update(
source=tr[src_node][1],
destination=tr[des_node][1],
options={},
signal=label
)
for i, fld in enumerate(connector_fields):
t['options'][connector_fields_string[i]] = tr[fld]
fields = req.session.model('ir.model.fields')
field_ids = fields.search([('model', '=', model), ('relation', '=', node)], 0, 0, 0, req.session.context)
field_data = fields.read(field_ids, ['relation_field'], req.session.context)
node_act = req.session.model(node)
search_acts = node_act.search([(field_data[0]['relation_field'], '=', id)], 0, 0, 0, req.session.context)
data_acts = node_act.read(search_acts, invisible_node_fields + visible_node_fields, req.session.context)
for act in data_acts:
n = nodes.get(str(act['id']))
if not n:
n = isolate_nodes.get(act['id'], {})
y_max += 140
n.update(x=20, y=y_max)
nodes[act['id']] = n
n.update(
id=act['id'],
color='white',
options={}
)
for color, expr in bgcolors.items():
if eval(expr, act):
n['color'] = color
for shape, expr in shapes.items():
if eval(expr, act):
n['shape'] = shape
for i, fld in enumerate(visible_node_fields):
n['options'][node_fields_string[i]] = act[fld]
_id, name = req.session.model(model).name_get([id], req.session.context)[0]
return dict(nodes=nodes,
conn=connectors,
name=name,
parent_field=graphs['node_parent_field'])
| agpl-3.0 |
karolciba/playground | ds_algos/heap.py | 1 | 3241 | #!/usr/bin/env python
def heapsort(ary, strategy = 'up'):
swaps = 0
def swap(i,j):
nonlocal swaps
swaps += 1
ary[i], ary[j] = ary[j], ary[i]
lst = len(ary)
def siftup(pos):
while pos:
if ary[pos] < ary[pos//2]:
swap(pos,pos//2)
pos //= 2
else:
break
def siftdown(pos, end):
while pos < end:
left = 2*pos if 2*pos < end else pos
right = 2*pos + 1 if 2 * pos + 1 < end else pos
toswap = pos
if ary[pos] > ary[left]:
toswap = left
if ary[toswap] > ary[right]:
toswap = right
if toswap == pos:
break
swap(pos, toswap)
pos = toswap
# build heap starting from first element
# print("before", ary)
if strategy == 'down':
for i in range(lst):
siftup(i)
else:
for i in range(lst-1, -1, -1):
siftdown(i,lst)
print("swaps", swaps)
# print("heapyfied", ary)
for i in range(lst-1, 0, -1):
swap(0,i)
siftdown(0,i)
# print("sorted", ary)
# sort tree swapping element for end, and rebuilding tree
class BinaryHeap():
def __init__(self ):
self._ary = []
def push(self, item):
pos = len(self._ary)
self._ary.append(item)
self.siftup(pos)
def siftup(self, pos):
while pos:
if self._ary[pos] < self._ary[pos//2]:
self._ary[pos], self._ary[pos//2] = self._ary[pos//2], self._ary[pos]
pos //= 2
else:
break
def pop(self):
lst = len(self._ary)
item = None
print(lst, item)
if lst >= 1:
self._ary[0], self._ary[lst-1] = self._ary[lst-1],self._ary[0]
item = self._ary.pop()
print(lst, item)
self.siftdown(0)
return item
def siftdown(self, pos):
lst = len(self._ary)
if lst == 0:
return None
while pos < lst:
left = 2 * pos
right = 2 * pos + 1
left = pos if left >= lst else left
right = pos if right >= lst else right
swap = pos
print("siftdown pos {} left {} right {} swap {} of len {}".format(pos, left, right, swap, len(self._ary)))
# if self._ary[left] >= self._ary[pos] <= self.ary[right]:
# return
if self._ary[pos] > self._ary[left]:
swap = left
if self._ary[swap] > self._ary[right]:
swap = right
if swap == pos:
return
self._ary[pos], self._ary[swap] = self._ary[swap], self._ary[pos]
pos = swap
if __name__ == '__main__':
import random
ary = list(range(1,10000))
random.shuffle(ary)
heapsort(ary, 'up')
srt = []
# heap = BinaryHeap()
# for i in ary:
# heap.push(i)
#
#
# print("heap", heap._ary)
# item = heap.pop()
# while item:
# print(item, heap._ary)
# srt.append(item)
# item = heap.pop()
#
#
# print("sorted", srt)
| unlicense |
sorenk/ansible | test/units/modules/network/f5/test_bigiq_regkey_license.py | 17 | 3902 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.bigiq_regkey_license import ModuleParameters
from library.bigiq_regkey_license import ApiParameters
from library.bigiq_regkey_license import ModuleManager
from library.bigiq_regkey_license import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigiq_regkey_license import ModuleParameters
from ansible.modules.network.f5.bigiq_regkey_license import ApiParameters
from ansible.modules.network.f5.bigiq_regkey_license import ModuleManager
from ansible.modules.network.f5.bigiq_regkey_license import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
regkey_pool='foo',
license_key='XXXX-XXXX-XXXX-XXXX-XXXX',
accept_eula=True,
description='this is a description'
)
p = ModuleParameters(params=args)
assert p.regkey_pool == 'foo'
assert p.license_key == 'XXXX-XXXX-XXXX-XXXX-XXXX'
assert p.accept_eula is True
assert p.description == 'this is a description'
def test_api_parameters(self):
args = load_fixture('load_regkey_license_key.json')
p = ApiParameters(params=args)
assert p.description == 'foo bar baz'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_create(self, *args):
set_module_args(dict(
regkey_pool='foo',
license_key='XXXX-XXXX-XXXX-XXXX-XXXX',
accept_eula=True,
description='this is a description',
password='passsword',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['description'] == 'this is a description'
| gpl-3.0 |
ekarlso/python-jolokiaclient | jolokiaclient/exceptions.py | 17 | 11913 | # Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 Nebula, Inc.
# Copyright 2013 Alessio Ababilov
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception definitions.
"""
import inspect
import sys
import six
class ClientException(Exception):
"""The base exception class for all exceptions this library raises.
"""
pass
class MissingArgs(ClientException):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = "Missing argument(s): %s" % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
class ValidationError(ClientException):
"""Error in validation on API client side."""
pass
class UnsupportedVersion(ClientException):
"""User is trying to use an unsupported version of the API."""
pass
class CommandError(ClientException):
"""Error in CLI tool."""
pass
class AuthorizationFailure(ClientException):
"""Cannot authorize API client."""
pass
class ConnectionRefused(ClientException):
"""Cannot connect to API service."""
pass
class AuthPluginOptionsMissing(AuthorizationFailure):
"""Auth plugin misses some options."""
def __init__(self, opt_names):
super(AuthPluginOptionsMissing, self).__init__(
"Authentication failed. Missing options: %s" %
", ".join(opt_names))
self.opt_names = opt_names
class AuthSystemNotFound(AuthorizationFailure):
"""User has specified a AuthSystem that is not installed."""
def __init__(self, auth_system):
super(AuthSystemNotFound, self).__init__(
"AuthSystemNotFound: %s" % repr(auth_system))
self.auth_system = auth_system
class NoUniqueMatch(ClientException):
"""Multiple entities found instead of one."""
pass
class EndpointException(ClientException):
"""Something is rotten in Service Catalog."""
pass
class EndpointNotFound(EndpointException):
"""Could not find requested endpoint in Service Catalog."""
pass
class AmbiguousEndpoints(EndpointException):
"""Found more than one matching endpoint in Service Catalog."""
def __init__(self, endpoints=None):
super(AmbiguousEndpoints, self).__init__(
"AmbiguousEndpoints: %s" % repr(endpoints))
self.endpoints = endpoints
class HttpError(ClientException):
"""The base exception class for all HTTP exceptions.
"""
http_status = 0
message = "HTTP Error"
def __init__(self, message=None, details=None,
response=None, request_id=None,
url=None, method=None, http_status=None):
self.http_status = http_status or self.http_status
self.message = message or self.message
self.details = details
self.request_id = request_id
self.response = response
self.url = url
self.method = method
formatted_string = "%s (HTTP %s)" % (self.message, self.http_status)
if request_id:
formatted_string += " (Request-ID: %s)" % request_id
super(HttpError, self).__init__(formatted_string)
class HTTPClientError(HttpError):
"""Client-side HTTP error.
Exception for cases in which the client seems to have erred.
"""
message = "HTTP Client Error"
class HttpServerError(HttpError):
"""Server-side HTTP error.
Exception for cases in which the server is aware that it has
erred or is incapable of performing the request.
"""
message = "HTTP Server Error"
class BadRequest(HTTPClientError):
"""HTTP 400 - Bad Request.
The request cannot be fulfilled due to bad syntax.
"""
http_status = 400
message = "Bad Request"
class Unauthorized(HTTPClientError):
"""HTTP 401 - Unauthorized.
Similar to 403 Forbidden, but specifically for use when authentication
is required and has failed or has not yet been provided.
"""
http_status = 401
message = "Unauthorized"
class PaymentRequired(HTTPClientError):
"""HTTP 402 - Payment Required.
Reserved for future use.
"""
http_status = 402
message = "Payment Required"
class Forbidden(HTTPClientError):
"""HTTP 403 - Forbidden.
The request was a valid request, but the server is refusing to respond
to it.
"""
http_status = 403
message = "Forbidden"
class NotFound(HTTPClientError):
"""HTTP 404 - Not Found.
The requested resource could not be found but may be available again
in the future.
"""
http_status = 404
message = "Not Found"
class MethodNotAllowed(HTTPClientError):
"""HTTP 405 - Method Not Allowed.
A request was made of a resource using a request method not supported
by that resource.
"""
http_status = 405
message = "Method Not Allowed"
class NotAcceptable(HTTPClientError):
"""HTTP 406 - Not Acceptable.
The requested resource is only capable of generating content not
acceptable according to the Accept headers sent in the request.
"""
http_status = 406
message = "Not Acceptable"
class ProxyAuthenticationRequired(HTTPClientError):
"""HTTP 407 - Proxy Authentication Required.
The client must first authenticate itself with the proxy.
"""
http_status = 407
message = "Proxy Authentication Required"
class RequestTimeout(HTTPClientError):
"""HTTP 408 - Request Timeout.
The server timed out waiting for the request.
"""
http_status = 408
message = "Request Timeout"
class Conflict(HTTPClientError):
"""HTTP 409 - Conflict.
Indicates that the request could not be processed because of conflict
in the request, such as an edit conflict.
"""
http_status = 409
message = "Conflict"
class Gone(HTTPClientError):
"""HTTP 410 - Gone.
Indicates that the resource requested is no longer available and will
not be available again.
"""
http_status = 410
message = "Gone"
class LengthRequired(HTTPClientError):
"""HTTP 411 - Length Required.
The request did not specify the length of its content, which is
required by the requested resource.
"""
http_status = 411
message = "Length Required"
class PreconditionFailed(HTTPClientError):
"""HTTP 412 - Precondition Failed.
The server does not meet one of the preconditions that the requester
put on the request.
"""
http_status = 412
message = "Precondition Failed"
class RequestEntityTooLarge(HTTPClientError):
"""HTTP 413 - Request Entity Too Large.
The request is larger than the server is willing or able to process.
"""
http_status = 413
message = "Request Entity Too Large"
def __init__(self, *args, **kwargs):
try:
self.retry_after = int(kwargs.pop('retry_after'))
except (KeyError, ValueError):
self.retry_after = 0
super(RequestEntityTooLarge, self).__init__(*args, **kwargs)
class RequestUriTooLong(HTTPClientError):
"""HTTP 414 - Request-URI Too Long.
The URI provided was too long for the server to process.
"""
http_status = 414
message = "Request-URI Too Long"
class UnsupportedMediaType(HTTPClientError):
"""HTTP 415 - Unsupported Media Type.
The request entity has a media type which the server or resource does
not support.
"""
http_status = 415
message = "Unsupported Media Type"
class RequestedRangeNotSatisfiable(HTTPClientError):
"""HTTP 416 - Requested Range Not Satisfiable.
The client has asked for a portion of the file, but the server cannot
supply that portion.
"""
http_status = 416
message = "Requested Range Not Satisfiable"
class ExpectationFailed(HTTPClientError):
"""HTTP 417 - Expectation Failed.
The server cannot meet the requirements of the Expect request-header field.
"""
http_status = 417
message = "Expectation Failed"
class UnprocessableEntity(HTTPClientError):
"""HTTP 422 - Unprocessable Entity.
The request was well-formed but was unable to be followed due to semantic
errors.
"""
http_status = 422
message = "Unprocessable Entity"
class InternalServerError(HttpServerError):
"""HTTP 500 - Internal Server Error.
A generic error message, given when no more specific message is suitable.
"""
http_status = 500
message = "Internal Server Error"
# NotImplemented is a python keyword.
class HttpNotImplemented(HttpServerError):
"""HTTP 501 - Not Implemented.
The server either does not recognize the request method, or it lacks
the ability to fulfill the request.
"""
http_status = 501
message = "Not Implemented"
class BadGateway(HttpServerError):
"""HTTP 502 - Bad Gateway.
The server was acting as a gateway or proxy and received an invalid
response from the upstream server.
"""
http_status = 502
message = "Bad Gateway"
class ServiceUnavailable(HttpServerError):
"""HTTP 503 - Service Unavailable.
The server is currently unavailable.
"""
http_status = 503
message = "Service Unavailable"
class GatewayTimeout(HttpServerError):
"""HTTP 504 - Gateway Timeout.
The server was acting as a gateway or proxy and did not receive a timely
response from the upstream server.
"""
http_status = 504
message = "Gateway Timeout"
class HttpVersionNotSupported(HttpServerError):
"""HTTP 505 - HttpVersion Not Supported.
The server does not support the HTTP protocol version used in the request.
"""
http_status = 505
message = "HTTP Version Not Supported"
# _code_map contains all the classes that have http_status attribute.
_code_map = dict(
(getattr(obj, 'http_status', None), obj)
for name, obj in six.iteritems(vars(sys.modules[__name__]))
if inspect.isclass(obj) and getattr(obj, 'http_status', False)
)
def from_response(response, method, url):
"""Returns an instance of :class:`HttpError` or subclass based on response.
:param response: instance of `requests.Response` class
:param method: HTTP method used for request
:param url: URL used for request
"""
kwargs = {
"http_status": response.status_code,
"response": response,
"method": method,
"url": url,
"request_id": response.headers.get("x-compute-request-id"),
}
if "retry-after" in response.headers:
kwargs["retry_after"] = response.headers["retry-after"]
content_type = response.headers.get("Content-Type", "")
if content_type.startswith("application/json"):
try:
body = response.json()
except ValueError:
pass
else:
if hasattr(body, "keys"):
error = body[body.keys()[0]]
kwargs["message"] = error.get("message", None)
kwargs["details"] = error.get("details", None)
elif content_type.startswith("text/"):
kwargs["details"] = response.text
try:
cls = _code_map[response.status_code]
except KeyError:
if 500 <= response.status_code < 600:
cls = HttpServerError
elif 400 <= response.status_code < 500:
cls = HTTPClientError
else:
cls = HttpError
return cls(**kwargs)
| apache-2.0 |
SimtterCom/gyp | test/win/gyptest-link-deffile.py | 344 | 1252 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure a .def file is handled in the link.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
# Multiple .def files doesn't make any sense, should fail at generate time.
test.run_gyp('deffile-multiple.gyp', chdir=CHDIR, stderr=None, status=1)
test.run_gyp('deffile.gyp', chdir=CHDIR)
test.build('deffile.gyp', test.ALL, chdir=CHDIR)
def HasExport(binary, export):
full_path = test.built_file_path(binary, chdir=CHDIR)
output = test.run_dumpbin('/exports', full_path)
return export in output
# Make sure we only have the export when the .def file is in use.
if HasExport('test_deffile_dll_notexported.dll', 'AnExportedFunction'):
test.fail_test()
if not HasExport('test_deffile_dll_ok.dll', 'AnExportedFunction'):
test.fail_test()
if HasExport('test_deffile_exe_notexported.exe', 'AnExportedFunction'):
test.fail_test()
if not HasExport('test_deffile_exe_ok.exe', 'AnExportedFunction'):
test.fail_test()
test.pass_test()
| bsd-3-clause |
kuznetz/rabbitvcs | rabbitvcs/vcs/git/gittyup/tests/commit.py | 4 | 1027 | #
# test/stage.py
#
import os
from shutil import rmtree
from sys import argv
from optparse import OptionParser
from gittyup.client import GittyupClient
from gittyup.objects import *
from util import touch, change
parser = OptionParser()
parser.add_option("-c", "--cleanup", action="store_true", default=False)
(options, args) = parser.parse_args(argv)
DIR = "commit"
if options.cleanup:
rmtree(DIR, ignore_errors=True)
print "commit.py clean"
else:
if os.path.isdir(DIR):
raise SystemExit("This test script has already been run. Please call this script with --cleanup to start again")
os.mkdir(DIR)
g = GittyupClient()
g.initialize_repository(DIR)
touch(DIR + "/test1.txt")
touch(DIR + "/test2.txt")
g.stage([DIR+"/test1.txt", DIR+"/test2.txt"])
g.commit("First commit", commit_all=True)
change(DIR + "/test1.txt")
g.stage([DIR+"/test1.txt"])
g.commit("Second commit", author="Alex Plumb <[email protected]>")
print "commit.py pass"
| gpl-2.0 |
Endika/edx-platform | openedx/core/djangoapps/credit/migrations/0001_initial.py | 48 | 12567 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import openedx.core.djangoapps.credit.models
import model_utils.fields
import xmodule_django.models
import jsonfield.fields
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CreditCourse',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('course_key', xmodule_django.models.CourseKeyField(unique=True, max_length=255, db_index=True)),
('enabled', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='CreditEligibility',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('username', models.CharField(max_length=255, db_index=True)),
('deadline', models.DateTimeField(default=openedx.core.djangoapps.credit.models.default_deadline_for_credit_eligibility, help_text='Deadline for purchasing and requesting credit.')),
('course', models.ForeignKey(related_name='eligibilities', to='credit.CreditCourse')),
],
options={
'verbose_name_plural': 'Credit eligibilities',
},
),
migrations.CreateModel(
name='CreditProvider',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('provider_id', models.CharField(help_text='Unique identifier for this credit provider. Only alphanumeric characters and hyphens (-) are allowed. The identifier is case-sensitive.', unique=True, max_length=255, validators=[django.core.validators.RegexValidator(regex=b'[a-z,A-Z,0-9,\\-]+', message=b'Only alphanumeric characters and hyphens (-) are allowed', code=b'invalid_provider_id')])),
('active', models.BooleanField(default=True, help_text='Whether the credit provider is currently enabled.')),
('display_name', models.CharField(help_text='Name of the credit provider displayed to users', max_length=255)),
('enable_integration', models.BooleanField(default=False, help_text='When true, automatically notify the credit provider when a user requests credit. In order for this to work, a shared secret key MUST be configured for the credit provider in secure auth settings.')),
('provider_url', models.URLField(default=b'', help_text='URL of the credit provider. If automatic integration is enabled, this will the the end-point that we POST to to notify the provider of a credit request. Otherwise, the user will be shown a link to this URL, so the user can request credit from the provider directly.')),
('provider_status_url', models.URLField(default=b'', help_text='URL from the credit provider where the user can check the status of his or her request for credit. This is displayed to students *after* they have requested credit.')),
('provider_description', models.TextField(default=b'', help_text='Description for the credit provider displayed to users.')),
('fulfillment_instructions', models.TextField(help_text='Plain text or html content for displaying further steps on receipt page *after* paying for the credit to get credit for a credit course against a credit provider.', null=True, blank=True)),
('eligibility_email_message', models.TextField(default=b'', help_text='Plain text or html content for displaying custom message inside credit eligibility email content which is sent when user has met all credit eligibility requirements.')),
('receipt_email_message', models.TextField(default=b'', help_text='Plain text or html content for displaying custom message inside credit receipt email content which is sent *after* paying to get credit for a credit course.')),
('thumbnail_url', models.URLField(default=b'', help_text='Thumbnail image url of the credit provider.', max_length=255)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CreditRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('uuid', models.CharField(unique=True, max_length=32, db_index=True)),
('username', models.CharField(max_length=255, db_index=True)),
('parameters', jsonfield.fields.JSONField()),
('status', models.CharField(default=b'pending', max_length=255, choices=[(b'pending', b'Pending'), (b'approved', b'Approved'), (b'rejected', b'Rejected')])),
('course', models.ForeignKey(related_name='credit_requests', to='credit.CreditCourse')),
('provider', models.ForeignKey(related_name='credit_requests', to='credit.CreditProvider')),
],
options={
'get_latest_by': 'created',
},
),
migrations.CreateModel(
name='CreditRequirement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('namespace', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('display_name', models.CharField(default=b'', max_length=255)),
('order', models.PositiveIntegerField(default=0)),
('criteria', jsonfield.fields.JSONField()),
('active', models.BooleanField(default=True)),
('course', models.ForeignKey(related_name='credit_requirements', to='credit.CreditCourse')),
],
options={
'ordering': ['order'],
},
),
migrations.CreateModel(
name='CreditRequirementStatus',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('username', models.CharField(max_length=255, db_index=True)),
('status', models.CharField(max_length=32, choices=[(b'satisfied', b'satisfied'), (b'failed', b'failed'), (b'declined', b'declined')])),
('reason', jsonfield.fields.JSONField(default={})),
('requirement', models.ForeignKey(related_name='statuses', to='credit.CreditRequirement')),
],
),
migrations.CreateModel(
name='HistoricalCreditRequest',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('uuid', models.CharField(max_length=32, db_index=True)),
('username', models.CharField(max_length=255, db_index=True)),
('parameters', jsonfield.fields.JSONField()),
('status', models.CharField(default=b'pending', max_length=255, choices=[(b'pending', b'Pending'), (b'approved', b'Approved'), (b'rejected', b'Rejected')])),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('course', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='credit.CreditCourse', null=True)),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
('provider', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='credit.CreditProvider', null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical credit request',
},
),
migrations.CreateModel(
name='HistoricalCreditRequirementStatus',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('username', models.CharField(max_length=255, db_index=True)),
('status', models.CharField(max_length=32, choices=[(b'satisfied', b'satisfied'), (b'failed', b'failed'), (b'declined', b'declined')])),
('reason', jsonfield.fields.JSONField(default={})),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
('requirement', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='credit.CreditRequirement', null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical credit requirement status',
},
),
migrations.AlterUniqueTogether(
name='creditrequirementstatus',
unique_together=set([('username', 'requirement')]),
),
migrations.AlterUniqueTogether(
name='creditrequirement',
unique_together=set([('namespace', 'name', 'course')]),
),
migrations.AlterUniqueTogether(
name='creditrequest',
unique_together=set([('username', 'course', 'provider')]),
),
migrations.AlterUniqueTogether(
name='crediteligibility',
unique_together=set([('username', 'course')]),
),
]
| agpl-3.0 |
jclakkis/discus-inferno | flaskenv/lib/python2.7/site-packages/flask/testsuite/testing.py | 561 | 7411 | # -*- coding: utf-8 -*-
"""
flask.testsuite.testing
~~~~~~~~~~~~~~~~~~~~~~~
Test client and more.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase
from flask._compat import text_type
class TestToolsTestCase(FlaskTestCase):
def test_environ_defaults_from_config(self):
app = flask.Flask(__name__)
app.testing = True
app.config['SERVER_NAME'] = 'example.com:1234'
app.config['APPLICATION_ROOT'] = '/foo'
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
self.assert_equal(ctx.request.url, 'http://example.com:1234/foo/')
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'http://example.com:1234/foo/')
def test_environ_defaults(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
self.assert_equal(ctx.request.url, 'http://localhost/')
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'http://localhost/')
def test_redirect_keep_session(self):
app = flask.Flask(__name__)
app.secret_key = 'testing'
@app.route('/', methods=['GET', 'POST'])
def index():
if flask.request.method == 'POST':
return flask.redirect('/getsession')
flask.session['data'] = 'foo'
return 'index'
@app.route('/getsession')
def get_session():
return flask.session.get('data', '<missing>')
with app.test_client() as c:
rv = c.get('/getsession')
assert rv.data == b'<missing>'
rv = c.get('/')
assert rv.data == b'index'
assert flask.session.get('data') == 'foo'
rv = c.post('/', data={}, follow_redirects=True)
assert rv.data == b'foo'
# This support requires a new Werkzeug version
if not hasattr(c, 'redirect_client'):
assert flask.session.get('data') == 'foo'
rv = c.get('/getsession')
assert rv.data == b'foo'
def test_session_transactions(self):
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'testing'
@app.route('/')
def index():
return text_type(flask.session['foo'])
with app.test_client() as c:
with c.session_transaction() as sess:
self.assert_equal(len(sess), 0)
sess['foo'] = [42]
self.assert_equal(len(sess), 1)
rv = c.get('/')
self.assert_equal(rv.data, b'[42]')
with c.session_transaction() as sess:
self.assert_equal(len(sess), 1)
self.assert_equal(sess['foo'], [42])
def test_session_transactions_no_null_sessions(self):
app = flask.Flask(__name__)
app.testing = True
with app.test_client() as c:
try:
with c.session_transaction() as sess:
pass
except RuntimeError as e:
self.assert_in('Session backend did not open a session', str(e))
else:
self.fail('Expected runtime error')
def test_session_transactions_keep_context(self):
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'testing'
with app.test_client() as c:
rv = c.get('/')
req = flask.request._get_current_object()
self.assert_true(req is not None)
with c.session_transaction():
self.assert_true(req is flask.request._get_current_object())
def test_session_transaction_needs_cookies(self):
app = flask.Flask(__name__)
app.testing = True
c = app.test_client(use_cookies=False)
try:
with c.session_transaction() as s:
pass
except RuntimeError as e:
self.assert_in('cookies', str(e))
else:
self.fail('Expected runtime error')
def test_test_client_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
flask.g.value = 42
return 'Hello World!'
@app.route('/other')
def other():
1 // 0
with app.test_client() as c:
resp = c.get('/')
self.assert_equal(flask.g.value, 42)
self.assert_equal(resp.data, b'Hello World!')
self.assert_equal(resp.status_code, 200)
resp = c.get('/other')
self.assert_false(hasattr(flask.g, 'value'))
self.assert_in(b'Internal Server Error', resp.data)
self.assert_equal(resp.status_code, 500)
flask.g.value = 23
try:
flask.g.value
except (AttributeError, RuntimeError):
pass
else:
raise AssertionError('some kind of exception expected')
def test_reuse_client(self):
app = flask.Flask(__name__)
c = app.test_client()
with c:
self.assert_equal(c.get('/').status_code, 404)
with c:
self.assert_equal(c.get('/').status_code, 404)
def test_test_client_calls_teardown_handlers(self):
app = flask.Flask(__name__)
called = []
@app.teardown_request
def remember(error):
called.append(error)
with app.test_client() as c:
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [])
self.assert_equal(called, [None])
del called[:]
with app.test_client() as c:
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [None])
self.assert_equal(called, [None, None])
class SubdomainTestCase(FlaskTestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.app.config['SERVER_NAME'] = 'example.com'
self.client = self.app.test_client()
self._ctx = self.app.test_request_context()
self._ctx.push()
def tearDown(self):
if self._ctx is not None:
self._ctx.pop()
def test_subdomain(self):
@self.app.route('/', subdomain='<company_id>')
def view(company_id):
return company_id
url = flask.url_for('view', company_id='xxx')
response = self.client.get(url)
self.assert_equal(200, response.status_code)
self.assert_equal(b'xxx', response.data)
def test_nosubdomain(self):
@self.app.route('/<company_id>')
def view(company_id):
return company_id
url = flask.url_for('view', company_id='xxx')
response = self.client.get(url)
self.assert_equal(200, response.status_code)
self.assert_equal(b'xxx', response.data)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestToolsTestCase))
suite.addTest(unittest.makeSuite(SubdomainTestCase))
return suite
| mit |
3dfxsoftware/cbss-addons | npg_bank_account_reconciliation/npg_bank_account_reconciliation.py | 1 | 21839 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 NovaPoint Group LLC (<http://www.novapointgroup.com>)
# Copyright (C) 2004-2010 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
import decimal_precision as dp
class bank_acc_rec_statement(osv.osv):
def check_group(self, cr, uid, ids, context=None):
"""Check if following security constraints are implemented for groups:
Bank Statement Preparer– they can create, view and delete any of the Bank Statements provided the Bank Statement is not in the DONE state,
or the Ready for Review state.
Bank Statement Verifier – they can create, view, edit, and delete any of the Bank Statements information at any time.
NOTE: DONE Bank Statements are only allowed to be deleted by a Bank Statement Verifier."""
model_data_obj = self.pool.get('ir.model.data')
res_groups_obj = self.pool.get('res.groups')
group_verifier_id = model_data_obj._get_id(cr, uid, 'npg_bank_account_reconciliation', 'group_bank_stmt_verifier')
for statement in self.browse(cr, uid, ids, context=context):
if group_verifier_id:
res_id = model_data_obj.read(cr, uid, [group_verifier_id], ['res_id'])[0]['res_id']
group_verifier = res_groups_obj.browse(cr, uid, res_id, context=context)
group_user_ids = [user.id for user in group_verifier.users]
if statement.state!='draft' and uid not in group_user_ids:
raise osv.except_osv(_('User Error !'),
_("Only a member of '%s' group may delete/edit bank statements when not in draft state!" %(group_verifier.name)))
return True
def copy(self, cr, uid, id, default={}, context=None):
default.update({
'credit_move_line_ids': [],
'debit_move_line_ids': [],
'name': '',
})
return super(bank_acc_rec_statement, self).copy(cr, uid, id, default=default, context=context)
def write(self, cr, uid, ids, vals, context=None):
self.check_group(cr, uid, ids, context) # Check if the user is allowed to perform the action
return super(bank_acc_rec_statement, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
"Reset the related account.move.line to be re-assigned later to statement."
statement_line_obj = self.pool.get('bank.acc.rec.statement.line')
self.check_group(cr, uid, ids, context) # Check if the user is allowed to perform the action
for statement in self.browse(cr, uid, ids, context=context):
statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids
statement_line_ids = map(lambda x: x.id, statement_lines)
statement_line_obj.unlink(cr, uid, statement_line_ids, context=context) # call unlink method to reset
return super(bank_acc_rec_statement, self).unlink(cr, uid, ids, context=context)
def check_difference_balance(self, cr, uid, ids, context=None):
"Check if difference balance is zero or not."
for statement in self.browse(cr, uid, ids, context=context):
if statement.difference != 0.0:
raise osv.except_osv(_('Warning!'),
_("Prior to reconciling a statement, all differences must be accounted for and the Difference balance must be zero." \
" Please review and make necessary changes."))
return True
def action_cancel(self, cr, uid, ids, context=None):
"Cancel the the statement."
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
return True
def action_review(self, cr, uid, ids, context=None):
"Change the status of statement from 'draft' to 'to_be_reviewed'."
# If difference balance not zero prevent further processing
self.check_difference_balance(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'state': 'to_be_reviewed'}, context=context)
return True
def action_process(self, cr, uid, ids, context=None):
"""Set the account move lines as 'Cleared' and Assign 'Bank Acc Rec Statement ID'
for the statement lines which are marked as 'Cleared'."""
account_move_line_obj = self.pool.get('account.move.line')
statement_line_obj = self.pool.get('bank.acc.rec.statement.line')
# If difference balance not zero prevent further processing
self.check_difference_balance(cr, uid, ids, context=context)
for statement in self.browse(cr, uid, ids, context=context):
statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids
for statement_line in statement_lines:
#Mark the move lines as 'Cleared'mand assign the 'Bank Acc Rec Statement ID'
account_move_line_obj.write(cr, uid, [statement_line.move_line_id.id],
{'cleared_bank_account': statement_line.cleared_bank_account,
'bank_acc_rec_statement_id': statement_line.cleared_bank_account and statement.id or False
}, context=context)
self.write(cr, uid, [statement.id], {'state': 'done',
'verified_by_user_id': uid,
'verified_date': time.strftime('%Y-%m-%d')
}, context=context)
return True
def action_cancel_draft(self, cr, uid, ids, context=None):
"""Reset the statement to draft and perform resetting operations."""
account_move_line_obj = self.pool.get('account.move.line')
statement_line_obj = self.pool.get('bank.acc.rec.statement.line')
for statement in self.browse(cr, uid, ids, context=context):
statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids
line_ids = []
statement_line_ids = []
for statement_line in statement_lines:
statement_line_ids.append(statement_line.id)
line_ids.append(statement_line.move_line_id.id) # Find move lines related to statement lines
# Reset 'Cleared' and 'Bank Acc Rec Statement ID' to False
account_move_line_obj.write(cr, uid, line_ids, {'cleared_bank_account': False,
'bank_acc_rec_statement_id': False,
}, context=context)
# Reset 'Cleared' in statement lines
statement_line_obj.write(cr, uid, statement_line_ids, {'cleared_bank_account': False,
'research_required': False
}, context=context)
# Reset statement
self.write(cr, uid, [statement.id], {'state': 'draft',
'verified_by_user_id': False,
'verified_date': False
}, context=context)
return True
def action_select_all(self, cr, uid, ids, context=None):
"""Mark all the statement lines as 'Cleared'."""
statement_line_obj = self.pool.get('bank.acc.rec.statement.line')
for statement in self.browse(cr, uid, ids, context=context):
statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids
statement_line_ids = map(lambda x: x.id, statement_lines)
statement_line_obj.write(cr, uid, statement_line_ids, {'cleared_bank_account': True}, context=context)
return True
def action_unselect_all(self, cr, uid, ids, context=None):
"""Reset 'Cleared' in all the statement lines."""
statement_line_obj = self.pool.get('bank.acc.rec.statement.line')
for statement in self.browse(cr, uid, ids, context=context):
statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids
statement_line_ids = map(lambda x: x.id, statement_lines)
statement_line_obj.write(cr, uid, statement_line_ids, {'cleared_bank_account': False}, context=context)
return True
def _get_balance(self, cr, uid, ids, name, args, context=None):
"""Computed as following:
A) Deposits, Credits, and Interest Amount: Total SUM of Amts of lines with Cleared = True
Deposits, Credits, and Interest # of Items: Total of number of lines with Cleared = True
B) Checks, Withdrawals, Debits, and Service Charges Amount:
Checks, Withdrawals, Debits, and Service Charges Amount # of Items:
Cleared Balance (Total Sum of the Deposit Amount Cleared (A) – Total Sum of Checks Amount Cleared (B))
Difference= (Ending Balance – Beginning Balance) - cleared balance = should be zero.
"""
res = {}
account_precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
for statement in self.browse(cr, uid, ids, context=context):
res[statement.id] = {
'sum_of_credits': 0.0,
'sum_of_debits': 0.0,
'cleared_balance': 0.0,
'difference': 0.0,
'sum_of_credits_lines': 0.0,
'sum_of_debits_lines': 0.0
}
for line in statement.credit_move_line_ids:
res[statement.id]['sum_of_credits'] += line.cleared_bank_account and round(line.amount, account_precision) or 0.0
res[statement.id]['sum_of_credits_lines'] += line.cleared_bank_account and 1.0 or 0.0
for line in statement.debit_move_line_ids:
res[statement.id]['sum_of_debits'] += line.cleared_bank_account and round(line.amount, account_precision) or 0.0
res[statement.id]['sum_of_debits_lines'] += line.cleared_bank_account and 1.0 or 0.0
res[statement.id]['cleared_balance'] = round(res[statement.id]['sum_of_debits'] - res[statement.id]['sum_of_credits'], account_precision)
res[statement.id]['difference'] = round((statement.ending_balance - statement.starting_balance) - res[statement.id]['cleared_balance'], account_precision)
return res
def refresh_record(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {}, context=context)
def onchange_account_id(self, cr, uid, ids, account_id, ending_date, suppress_ending_date_filter, context=None):
account_move_line_obj = self.pool.get('account.move.line')
statement_line_obj = self.pool.get('bank.acc.rec.statement.line')
val = {'value': {'credit_move_line_ids': [], 'debit_move_line_ids': []}}
if account_id:
for statement in self.browse(cr, uid, ids, context=context):
statement_line_ids = statement_line_obj.search(cr, uid, [('statement_id', '=', statement.id)], context=context)
# call unlink method to reset and remove existing statement lines and
# mark reset field values in related move lines
statement_line_obj.unlink(cr, uid, statement_line_ids, context=context)
# Apply filter on move lines to allow
#1. credit and debit side journal items in posted state of the selected GL account
#2. Journal items which are not assigned to previous bank statements
#3. Date less than or equal to ending date provided the 'Suppress Ending Date Filter' is not checkec
domain = [('account_id', '=', account_id), ('move_id.state', '=', 'posted'), ('cleared_bank_account', '=', False), ('draft_assigned_to_statement', '=', False)]
if not suppress_ending_date_filter:
domain += [('date', '<=', ending_date)]
line_ids = account_move_line_obj.search(cr, uid, domain, context=context)
for line in account_move_line_obj.browse(cr, uid, line_ids, context=context):
res = {
'ref': line.ref,
'date': line.date,
'partner_id': line.partner_id.id,
'currency_id': line.currency_id.id,
'amount': line.credit or line.debit,
'name': line.name,
'move_line_id': line.id,
'type': line.credit and 'cr' or 'dr'
}
if res['type'] == 'cr':
val['value']['credit_move_line_ids'].append(res)
else:
val['value']['debit_move_line_ids'].append(res)
return val
_name = "bank.acc.rec.statement"
_columns = {
'name': fields.char('Name', required=True, size=64, states={'done':[('readonly', True)]}, help="This is a unique name identifying the statement (e.g. Bank X January 2012)."),
'account_id': fields.many2one('account.account', 'Account', required=True,
states={'done':[('readonly', True)]}, domain="[('company_id', '=', company_id), ('type', '!=', 'view')]",
help="The Bank/Gl Account that is being reconciled."),
'ending_date': fields.date('Ending Date', required=True, states={'done':[('readonly', True)]}, help="The ending date of your bank statement."),
'starting_balance': fields.float('Starting Balance', required=True, digits_compute=dp.get_precision('Account'), help="The Starting Balance on your bank statement.", states={'done':[('readonly', True)]}),
'ending_balance': fields.float('Ending Balance', required=True, digits_compute=dp.get_precision('Account'), help="The Ending Balance on your bank statement.", states={'done':[('readonly', True)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True,
help="The Company for which the deposit ticket is made to"),
'notes': fields.text('Notes'),
'verified_date': fields.date('Verified Date', states={'done':[('readonly', True)]},
help="Date in which Deposit Ticket was verified."),
'verified_by_user_id': fields.many2one('res.users', 'Verified By', states={'done':[('readonly', True)]},
help="Entered automatically by the “last user” who saved it. System generated."),
'credit_move_line_ids': fields.one2many('bank.acc.rec.statement.line', 'statement_id', 'Credits',
domain=[('type','=','cr')], context={'default_type':'cr'}, states={'done':[('readonly', True)]}),
'debit_move_line_ids': fields.one2many('bank.acc.rec.statement.line', 'statement_id', 'Debits',
domain=[('type','=','dr')], context={'default_type':'dr'}, states={'done':[('readonly', True)]}),
'cleared_balance': fields.function(_get_balance, method=True, string='Cleared Balance', digits_compute=dp.get_precision('Account'),
type='float', help="Total Sum of the Deposit Amount Cleared – Total Sum of Checks, Withdrawals, Debits, and Service Charges Amount Cleared",
multi="balance"),
'difference': fields.function(_get_balance, method=True, type='float', string='Difference', digits_compute=dp.get_precision('Account'),
help="(Ending Balance – Beginning Balance) - Cleared Balance.", multi="balance"),
'sum_of_credits': fields.function(_get_balance, method=True, string='Checks, Withdrawals, Debits, and Service Charges Amount', digits_compute=dp.get_precision('Account'),
type='float', help="Total SUM of Amts of lines with Cleared = True",
multi="balance"),
'sum_of_debits': fields.function(_get_balance, method=True, type='float', string='Deposits, Credits, and Interest Amount', digits_compute=dp.get_precision('Account'),
help="Total SUM of Amts of lines with Cleared = True", multi="balance"),
'sum_of_credits_lines': fields.function(_get_balance, method=True, string='Checks, Withdrawals, Debits, and Service Charges # of Items',
type='float', help="Total of number of lines with Cleared = True",
multi="balance"),
'sum_of_debits_lines': fields.function(_get_balance, method=True, type='float', string='Deposits, Credits, and Interest # of Items',
help="Total of number of lines with Cleared = True", multi="balance"),
'suppress_ending_date_filter': fields.boolean('Remove Ending Date Filter', help="If this is checked then the Statement End Date filter on the transactions below will not occur. All transactions would come over."),
'state': fields.selection([
('draft','Draft'),
('to_be_reviewed','Ready for Review'),
('done','Done'),
('cancel', 'Cancel')
],'State', select=True, readonly=True),
}
_defaults = {
'state': 'draft',
'company_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
'ending_date': time.strftime('%Y-%m-%d'),
}
_order = "ending_date desc"
_sql_constraints = [
('name_company_uniq', 'unique (name, company_id, account_id)', 'The name of the statement must be unique per company and G/L account!')
]
bank_acc_rec_statement()
class bank_acc_rec_statement_line(osv.osv):
_name = "bank.acc.rec.statement.line"
_description = "Statement Line"
_columns = {
'name': fields.char('Name', size=64, help="Derived from the related Journal Item.", required=True),
'ref': fields.char('Reference', size=64, help="Derived from related Journal Item."),
'partner_id': fields.many2one('res.partner', string='Partner', help="Derived from related Journal Item."),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Account'),
help="Derived from the 'debit' amount from related Journal Item."),
'date': fields.date('Date', required=True, help="Derived from related Journal Item."),
'statement_id': fields.many2one('bank.acc.rec.statement', 'Statement', required=True, ondelete='cascade'),
'move_line_id': fields.many2one('account.move.line', 'Journal Item', help="Related Journal Item."),
'cleared_bank_account': fields.boolean('Cleared? ', help='Check if the transaction has cleared from the bank'),
'research_required': fields.boolean('Research Required? ', help='Check if the transaction should be researched by Accounting personal'),
'currency_id': fields.many2one('res.currency', 'Currency', help="The optional other currency if it is a multi-currency entry."),
'type':fields.selection([('dr','Debit'),('cr','Credit')], 'Cr/Dr'),
}
def create(self, cr, uid, vals, context=None):
account_move_line_obj = self.pool.get('account.move.line')
# Prevent manually adding new statement line.
# This would allow only onchange method to pre-populate statement lines based on the filter rules.
if not vals.get('move_line_id', False):
raise osv.except_osv(_('Processing Error'),_('You cannot add any new bank statement line manually as of this revision!'))
account_move_line_obj.write(cr, uid, [vals['move_line_id']], {'draft_assigned_to_statement': True}, context=context)
return super(bank_acc_rec_statement_line, self).create(cr, uid, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
move_line_ids = map(lambda x: x.move_line_id.id, self.browse(cr, uid, ids, context=context))
# Reset field values in move lines to be added later
account_move_line_obj.write(cr, uid, move_line_ids, {'draft_assigned_to_statement': False,
'cleared_bank_account': False,
'bank_acc_rec_statement_id': False,
}, context=context)
return super(bank_acc_rec_statement_line, self).unlink(cr, uid, ids, context=context)
bank_acc_rec_statement_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | gpl-2.0 |
robobrobro/ballin-octo-shame | lib/Python-3.4.3/Lib/idlelib/idle_test/test_config_name.py | 93 | 2399 | """Unit tests for idlelib.configSectionNameDialog"""
import unittest
from idlelib.idle_test.mock_tk import Var, Mbox
from idlelib import configSectionNameDialog as name_dialog_module
name_dialog = name_dialog_module.GetCfgSectionNameDialog
class Dummy_name_dialog:
# Mock for testing the following methods of name_dialog
name_ok = name_dialog.name_ok
Ok = name_dialog.Ok
Cancel = name_dialog.Cancel
# Attributes, constant or variable, needed for tests
used_names = ['used']
name = Var()
result = None
destroyed = False
def destroy(self):
self.destroyed = True
# name_ok calls Mbox.showerror if name is not ok
orig_mbox = name_dialog_module.tkMessageBox
showerror = Mbox.showerror
class ConfigNameTest(unittest.TestCase):
dialog = Dummy_name_dialog()
@classmethod
def setUpClass(cls):
name_dialog_module.tkMessageBox = Mbox
@classmethod
def tearDownClass(cls):
name_dialog_module.tkMessageBox = orig_mbox
def test_blank_name(self):
self.dialog.name.set(' ')
self.assertEqual(self.dialog.name_ok(), '')
self.assertEqual(showerror.title, 'Name Error')
self.assertIn('No', showerror.message)
def test_used_name(self):
self.dialog.name.set('used')
self.assertEqual(self.dialog.name_ok(), '')
self.assertEqual(showerror.title, 'Name Error')
self.assertIn('use', showerror.message)
def test_long_name(self):
self.dialog.name.set('good'*8)
self.assertEqual(self.dialog.name_ok(), '')
self.assertEqual(showerror.title, 'Name Error')
self.assertIn('too long', showerror.message)
def test_good_name(self):
self.dialog.name.set(' good ')
showerror.title = 'No Error' # should not be called
self.assertEqual(self.dialog.name_ok(), 'good')
self.assertEqual(showerror.title, 'No Error')
def test_ok(self):
self.dialog.destroyed = False
self.dialog.name.set('good')
self.dialog.Ok()
self.assertEqual(self.dialog.result, 'good')
self.assertTrue(self.dialog.destroyed)
def test_cancel(self):
self.dialog.destroyed = False
self.dialog.Cancel()
self.assertEqual(self.dialog.result, '')
self.assertTrue(self.dialog.destroyed)
if __name__ == '__main__':
unittest.main(verbosity=2, exit=False)
| mit |
geraldarthur/qgis-openlayers-plugin | openlayers/weblayers/weblayer_registry.py | 5 | 2552 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
OpenLayers Plugin
A QGIS plugin
-------------------
begin : 2009-11-30
copyright : (C) 2009 by Pirmin Kalberer, Sourcepole
email : pka at sourcepole.ch
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from weblayer import WebLayerGroup
class WebLayerTypeRegistry:
"""Registry of OL web Layers"""
def __init__(self, plugin):
self._plugin = plugin
self._groups = {}
self._olLayerTypes = {}
self._layerTypeId = 0 # Sequence for ID
self._olLayerTypeNames = {}
def group(self, name, icon):
"""Create group and register in registry"""
if name not in self._groups:
self._groups[name] = WebLayerGroup(name, icon)
return self._groups[name]
def groups(self):
return self._groups.values()
def register(self, layerType):
layerType.group = self.group(layerType.groupName, layerType.groupIcon)
layerType.setAddLayerCallback(self._plugin.addLayer)
layerType.layerTypeId = self._layerTypeId
self._olLayerTypes[self._layerTypeId] = layerType
self._layerTypeId += 1
self._olLayerTypeNames[layerType.layerTypeName] = layerType
def types(self):
return self._olLayerTypes.values()
def getById(self, id):
if self._olLayerTypes.has_key(id):
return self._olLayerTypes[id]
else:
return None
def getByName(self, name):
if self._olLayerTypeNames.has_key(name):
return self._olLayerTypeNames[name]
else:
return None
def groupLayerTypes(self, group):
lst = []
for lyr in self.types():
if lyr.group == group:
lst.append(lyr)
return lst
| gpl-2.0 |
johngian/remo | remo/profiles/migrations/0050_auto__add_field_userprofile_is_rotm_nominee.py | 3 | 11048 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.is_rotm_nominee'
db.add_column(u'profiles_userprofile', 'is_rotm_nominee',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.is_rotm_nominee'
db.delete_column(u'profiles_userprofile', 'is_rotm_nominee')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'profiles.functionalarea': {
'Meta': {'ordering': "['name']", 'object_name': 'FunctionalArea'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'})
},
u'profiles.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 11, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'profiles.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'users_added'", 'null': 'True', 'to': u"orm['auth.User']"}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'current_streak_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_joined_program': ('django.db.models.fields.DateField', [], {'blank': 'True'}),
'diaspora_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'first_report_notification': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_matching'", 'symmetrical': 'False', 'to': u"orm['profiles.FunctionalArea']"}),
'gender': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channels': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'irc_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'is_rotm_nominee': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_unavailable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'jabber_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'local_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'longest_streak_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'longest_streak_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mentor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mentees'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'mozillian_username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'mozillians_profile_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'personal_blog_feed': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'personal_website_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'private_email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'null': 'True'}),
'receive_email_on_add_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'receive_email_on_add_event_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'receive_email_on_add_voting_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'registration_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'second_report_notification': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'tracked_functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_tracking'", 'symmetrical': 'False', 'to': u"orm['profiles.FunctionalArea']"}),
'twitter_account': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'unavailability_task_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'wiki_profile_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
u'profiles.userstatus': {
'Meta': {'ordering': "['-expected_date', '-created_on']", 'object_name': 'UserStatus'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expected_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'replacement_rep': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'replaced_rep'", 'null': 'True', 'to': u"orm['auth.User']"}),
'return_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'status'", 'to': u"orm['auth.User']"})
}
}
complete_apps = ['profiles'] | bsd-3-clause |
jianhuashao/WebDownloadJobsManage | dbs/google_drive/oauth2client/keyring_storage.py | 273 | 3227 | # Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A keyring based Storage.
A Storage for Credentials that uses the keyring module.
"""
__author__ = '[email protected] (Joe Gregorio)'
import keyring
import threading
from client import Storage as BaseStorage
from client import Credentials
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from the keyring.
To use this module you must have the keyring module installed. See
<http://pypi.python.org/pypi/keyring/>. This is an optional module and is not
installed with oauth2client by default because it does not work on all the
platforms that oauth2client supports, such as Google App Engine.
The keyring module <http://pypi.python.org/pypi/keyring/> is a cross-platform
library for access the keyring capabilities of the local system. The user will
be prompted for their keyring password when this module is used, and the
manner in which the user is prompted will vary per platform.
Usage:
from oauth2client.keyring_storage import Storage
s = Storage('name_of_application', 'user1')
credentials = s.get()
"""
def __init__(self, service_name, user_name):
"""Constructor.
Args:
service_name: string, The name of the service under which the credentials
are stored.
user_name: string, The name of the user to store credentials for.
"""
self._service_name = service_name
self._user_name = user_name
self._lock = threading.Lock()
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant."""
self._lock.acquire()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._lock.release()
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
credentials = None
content = keyring.get_password(self._service_name, self._user_name)
if content is not None:
try:
credentials = Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials
def locked_put(self, credentials):
"""Write Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
keyring.set_password(self._service_name, self._user_name,
credentials.to_json())
def locked_delete(self):
"""Delete Credentials file.
Args:
credentials: Credentials, the credentials to store.
"""
keyring.set_password(self._service_name, self._user_name, '')
| apache-2.0 |
gbaty/shiboken2 | tests/samplebinding/ownership_delete_child_in_python_test.py | 6 | 1815 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the Shiboken Python Bindings Generator project.
#
# Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
#
# Contact: PySide team <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# version 2.1 as published by the Free Software Foundation. Please
# review the following information to ensure the GNU Lesser General
# Public License version 2.1 requirements will be met:
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
# #
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
'''Tests for deleting a child object in python'''
import unittest
import random
import string
from sample import ObjectType
from py3kcompat import IS_PY3K
if IS_PY3K:
string.letters = string.ascii_letters
class DeleteChildInPython(unittest.TestCase):
'''Test case for deleting (unref) a child in python'''
def testDeleteChild(self):
'''Delete child in python should not invalidate child'''
parent = ObjectType()
child = ObjectType(parent)
name = ''.join(random.sample(string.letters, 5))
child.setObjectName(name)
del child
new_child = parent.children()[0]
self.assertEqual(new_child.objectName(), name)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
KitKatXperience/platform_external_chromium_org | chrome/test/webdriver/test/chromedriver.py | 41 | 7950 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chrome WebDriver that implements extra Chrome-specific functionality.
This module is experimental and will change and break without warning.
Use at your own risk.
Style Note: Because this is an extension to the WebDriver python API and
since this module will eventually be moved into the webdriver codebase, the
code follows WebDriver naming conventions for functions.
"""
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
class _ViewType(object):
"""Constants representing different web view types in Chrome.
They mirror the enum AutomationId::Type in chrome/common/automation_id.h.
"""
TAB = 1
EXTENSION_POPUP = 2
EXTENSION_BG_PAGE = 3
EXTENSION_INFOBAR = 4
APP_SHELL = 6
class WebDriver(RemoteWebDriver):
"""
Controls Chrome and provides additional Chrome-specific functionality not in
the WebDriver standard.
This class is experimental and subject to change and break without warning.
Use at your own risk.
"""
_CHROME_GET_EXTENSIONS = "chrome.getExtensions"
_CHROME_INSTALL_EXTENSION = "chrome.installExtension"
_CHROME_GET_EXTENSION_INFO = "chrome.getExtensionInfo"
_CHROME_MODIFY_EXTENSION = "chrome.setExtensionState"
_CHROME_UNINSTALL_EXTENSION = "chrome.uninstallExtension"
_CHROME_GET_VIEW_HANDLES = "chrome.getViewHandles"
_CHROME_DUMP_HEAP_PROFILE = "chrome.dumpHeapProfile"
def __init__(self, url, desired_capabilities={}):
"""Creates a WebDriver that controls Chrome via ChromeDriver.
Args:
url: The URL of a running ChromeDriver server.
desired_capabilities: Requested capabilities for the new WebDriver
session.
"""
RemoteWebDriver.__init__(self,
command_executor=url,
desired_capabilities=desired_capabilities)
# Add custom commands.
custom_commands = {
WebDriver._CHROME_GET_EXTENSIONS:
('GET', '/session/$sessionId/chrome/extensions'),
WebDriver._CHROME_INSTALL_EXTENSION:
('POST', '/session/$sessionId/chrome/extensions'),
WebDriver._CHROME_GET_EXTENSION_INFO:
('GET', '/session/$sessionId/chrome/extension/$id'),
WebDriver._CHROME_MODIFY_EXTENSION:
('POST', '/session/$sessionId/chrome/extension/$id'),
WebDriver._CHROME_UNINSTALL_EXTENSION:
('DELETE', '/session/$sessionId/chrome/extension/$id'),
WebDriver._CHROME_GET_VIEW_HANDLES:
('GET', '/session/$sessionId/chrome/views'),
WebDriver._CHROME_DUMP_HEAP_PROFILE:
('POST', '/session/$sessionId/chrome/heapprofilerdump')
}
self.command_executor._commands.update(custom_commands)
def get_installed_extensions(self):
"""Returns a list of installed extensions."""
ids = RemoteWebDriver.execute(
self, WebDriver._CHROME_GET_EXTENSIONS)['value']
return map(lambda id: Extension(self, id), ids)
def install_extension(self, path):
"""Install the extension at the given path.
Args:
path: Path to packed or unpacked extension to install.
Returns:
The installed extension.
"""
params = {'path': path}
id = RemoteWebDriver.execute(
self, WebDriver._CHROME_INSTALL_EXTENSION, params)['value']
return Extension(self, id)
def dump_heap_profile(self, reason):
"""Dumps a heap profile. It works only on Linux and ChromeOS.
We need an environment variable "HEAPPROFILE" set to a directory and a
filename prefix, for example, "/tmp/prof". In a case of this example,
heap profiles will be dumped into "/tmp/prof.(pid).0002.heap",
"/tmp/prof.(pid).0003.heap", and so on. Nothing happens when this
function is called without the env.
Args:
reason: A string which describes the reason for dumping a heap profile.
The reason will be included in the logged message.
Examples:
'To check memory leaking'
'For WebDriver tests'
"""
if self.IsLinux(): # IsLinux() also implies IsChromeOS().
params = {'reason': reason}
RemoteWebDriver.execute(self, WebDriver._CHROME_DUMP_HEAP_PROFILE, params)
else:
raise WebDriverException('Heap-profiling is not supported in this OS.')
class Extension(object):
"""Represents a Chrome extension/app."""
def __init__(self, parent, id):
self._parent = parent
self._id = id
@property
def id(self):
return self._id
def get_name(self):
return self._get_info()['name']
def get_version(self):
return self._get_info()['version']
def is_enabled(self):
return self._get_info()['is_enabled']
def set_enabled(self, value):
self._execute(WebDriver._CHROME_MODIFY_EXTENSION, {'enable': value})
def is_page_action_visible(self):
"""Returns whether the page action is visible in the currently targeted tab.
This will fail if the current target is not a tab.
"""
return self._get_info()['is_page_action_visible']
def uninstall(self):
self._execute(WebDriver._CHROME_UNINSTALL_EXTENSION)
def click_browser_action(self):
"""Clicks the browser action in the currently targeted tab.
This will fail if the current target is not a tab.
"""
self._execute(WebDriver._CHROME_MODIFY_EXTENSION,
{'click_button': 'browser_action'})
def click_page_action(self):
"""Clicks the page action in the currently targeted tab.
This will fail if the current target is not a tab.
"""
self._execute(WebDriver._CHROME_MODIFY_EXTENSION,
{'click_button': 'page_action'})
def get_app_shell_handle(self):
"""Returns the window handle for the app shell."""
return self._get_handle(_ViewType.APP_SHELL)
def get_bg_page_handle(self):
"""Returns the window handle for the background page."""
return self._get_handle(_ViewType.EXTENSION_BG_PAGE)
def get_popup_handle(self):
"""Returns the window handle for the open browser/page action popup."""
return self._get_handle(_ViewType.EXTENSION_POPUP)
def get_infobar_handles(self):
"""Returns a list of window handles for all open infobars of this extension.
This handle can be used with |WebDriver.switch_to_window|.
"""
infobars = filter(lambda view: view['type'] == _ViewType.EXTENSION_INFOBAR,
self._get_views())
return map(lambda view: view['handle'], infobars)
def _get_handle(self, type):
"""Returns the window handle for the page of given type.
This handle can be used with |WebDriver.switch_to_window|.
Args:
type: The type of the window as defined in _ViewType.
Returns:
The window handle, or None if there is no page with the given type.
"""
pages = filter(lambda view: view['type'] == type, self._get_views())
if len(pages) > 0:
return pages[0]['handle']
return None
def _get_info(self):
"""Returns a dictionary of all this extension's info."""
return self._execute(WebDriver._CHROME_GET_EXTENSION_INFO)['value']
def _get_views(self):
"""Returns a list of view information for this extension."""
views = self._parent.execute(WebDriver._CHROME_GET_VIEW_HANDLES)['value']
ext_views = []
for view in views:
if 'extension_id' in view and view['extension_id'] == self._id:
ext_views += [view]
return ext_views
def _execute(self, command, params=None):
"""Executes a command against the underlying extension.
Args:
command: The name of the command to execute.
params: A dictionary of named parameters to send with the command.
Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {}
params['id'] = self._id
return self._parent.execute(command, params)
| bsd-3-clause |
porjo/docker | docs/docs-update.py | 12 | 8230 | #!/usr/bin/env python
#
# Sven's quick hack script to update the documentation
#
# call with:
# ./docs/update.py /usr/bin/docker
#
import datetime
import re
from sys import argv
import subprocess
import os
import os.path
script, docker_cmd = argv
date_string = datetime.date.today().strftime('%B %Y')
def print_usage(outtext, docker_cmd, command):
try:
help_string = subprocess.check_output(
"".join((docker_cmd, " ", command, " --help")),
stderr=subprocess.STDOUT,
shell=True
)
except subprocess.CalledProcessError, e:
help_string = e.output
for l in str(help_string).strip().split("\n"):
l = l.rstrip()
if l == '':
outtext.write("\n")
else:
# `docker --help` tells the user the path they called it with
l = re.sub(docker_cmd, "docker", l)
outtext.write(" {}\n".format(l))
outtext.write("\n")
# TODO: look for an complain about any missing commands
def update_cli_reference():
originalFile = "docs/sources/reference/commandline/cli.md"
os.rename(originalFile, originalFile+".bak")
intext = open("{}.bak".format(originalFile), "r")
outtext = open(originalFile, "w")
mode = 'p'
space = " "
command = ""
# 2 mode line-by line parser
for line in intext:
if mode == 'p':
# Prose
match = re.match("( \s*)Usage: docker ([a-z]+)", line)
if match:
# the begining of a Docker command usage block
space = match.group(1)
command = match.group(2)
mode = 'c'
else:
match = re.match("( \s*)Usage of .*docker.*:", line)
if match:
# the begining of the Docker --help usage block
space = match.group(1)
command = ""
mode = 'c'
else:
outtext.write(line)
else:
# command usage block
match = re.match("("+space+")(.*)|^$", line)
if not match:
# The end of the current usage block
# Shell out to run docker to see the new output
print_usage(outtext, docker_cmd, command)
outtext.write(line)
mode = 'p'
if mode == 'c':
print_usage(outtext, docker_cmd, command)
def update_man_pages():
cmds = []
try:
help_string = subprocess.check_output(
"".join((docker_cmd)),
stderr=subprocess.STDOUT,
shell=True
)
except subprocess.CalledProcessError, e:
help_string = e.output
for l in str(help_string).strip().split("\n"):
l = l.rstrip()
if l != "":
match = re.match(" (.*?) .*", l)
if match:
cmds.append(match.group(1))
desc_re = re.compile(
r".*# DESCRIPTION(.*?)# (OPTIONS|EXAMPLES?).*",
re.MULTILINE | re.DOTALL
)
example_re = re.compile(
r".*# EXAMPLES?(.*)# HISTORY.*",
re.MULTILINE | re.DOTALL
)
history_re = re.compile(
r".*# HISTORY(.*)",
re.MULTILINE | re.DOTALL
)
for command in cmds:
print "COMMAND: "+command
history = ""
description = ""
examples = ""
if os.path.isfile("docs/man/docker-"+command+".1.md"):
intext = open("docs/man/docker-"+command+".1.md", "r")
txt = intext.read()
intext.close()
match = desc_re.match(txt)
if match:
description = match.group(1)
match = example_re.match(txt)
if match:
examples = match.group(1)
match = history_re.match(txt)
if match:
history = match.group(1).strip()
usage = ""
usage_description = ""
params = {}
key_params = {}
try:
help_string = subprocess.check_output(
"".join((docker_cmd, " ", command, " --help")),
stderr=subprocess.STDOUT,
shell=True
)
except subprocess.CalledProcessError, e:
help_string = e.output
last_key = ""
for l in str(help).split("\n"):
l = l.rstrip()
if l != "":
match = re.match("Usage: docker {}(.*)".format(command), l)
if match:
usage = match.group(1).strip()
else:
match = re.match(" (-+)(.*) \s+(.*)", l)
if match:
last_key = match.group(2).rstrip()
key_params[last_key] = match.group(1)+last_key
params[last_key] = match.group(3)
else:
if last_key != "":
params[last_key] = "{}\n{}".format(params[last_key], l)
else:
if usage_description != "":
usage_description = usage_description + "\n"
usage_description = usage_description + l
# replace [OPTIONS] with the list of params
options = ""
match = re.match("\[OPTIONS\](.*)", usage)
if match:
usage = match.group(1)
new_usage = ""
# TODO: sort without the `-`'s
for key in sorted(params.keys(), key=lambda s: s.lower()):
# split on commas, remove --?.*=.*, put in *'s mumble
ps = []
opts = []
for k in key_params[key].split(","):
match = re.match("(-+)([A-Za-z-0-9]*)(?:=(.*))?", k.lstrip())
if match:
p = "**{}{}**".format(match.group(1), match.group(2))
o = "**{}{}**".format(match.group(1), match.group(2))
if match.group(3):
val = match.group(3)
if val == "\"\"":
val = match.group(2).upper()
p = "{}[=*{}*]".format(p, val)
val = match.group(3)
if val in ("true", "false"):
params[key] = params[key].rstrip()
if not params[key].endswith('.'):
params[key] = params[key]+ "."
params[key] = "{} The default is *{}*.".format(params[key], val)
val = "*true*|*false*"
o = "{}={}".format(o, val)
ps.append(p)
opts.append(o)
else:
print "nomatch:{}".format(k)
new_usage = "{}\n[{}]".format(new_usage, "|".join(ps))
options = "{}{}\n {}\n\n".format(options, ", ".join(opts), params[key])
if new_usage != "":
new_usage = "{}\n".format(new_usage.strip())
usage = new_usage + usage
outtext = open("docs/man/docker-{}.1.md".format(command), "w")
outtext.write("""% DOCKER(1) Docker User Manuals
% Docker Community
% JUNE 2014
# NAME
""")
outtext.write("docker-{} - {}\n\n".format(command, usage_description))
outtext.write("# SYNOPSIS\n**docker {}**\n{}\n\n".format(command, usage))
if description != "":
outtext.write("# DESCRIPTION{}".format(description))
if options == "":
options = "There are no available options.\n\n"
outtext.write("# OPTIONS\n{}".format(options))
if examples != "":
outtext.write("# EXAMPLES{}".format(examples))
outtext.write("# HISTORY\n")
if history != "":
outtext.write("{}\n".format(history))
recent_history_re = re.compile(
".*{}.*".format(date_string),
re.MULTILINE | re.DOTALL
)
if not recent_history_re.match(history):
outtext.write("{}, updated by Sven Dowideit <[email protected]>\n".format(date_string))
outtext.close()
# main
update_cli_reference()
update_man_pages()
| apache-2.0 |
bclau/nova | nova/availability_zones.py | 10 | 4790 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Availability zone helper functions."""
from oslo.config import cfg
from nova import db
from nova.openstack.common import memorycache
# NOTE(vish): azs don't change that often, so cache them for an hour to
# avoid hitting the db multiple times on every request.
AZ_CACHE_SECONDS = 60 * 60
MC = None
availability_zone_opts = [
cfg.StrOpt('internal_service_availability_zone',
default='internal',
help='availability_zone to show internal services under'),
cfg.StrOpt('default_availability_zone',
default='nova',
help='default compute node availability_zone'),
]
CONF = cfg.CONF
CONF.register_opts(availability_zone_opts)
def _get_cache():
global MC
if MC is None:
MC = memorycache.get_client()
return MC
def reset_cache():
"""Reset the cache, mainly for testing purposes and update
availability_zone for host aggregate
"""
global MC
MC = None
def _make_cache_key(host):
return "azcache-%s" % host.encode('utf-8')
def set_availability_zones(context, services):
# Makes sure services isn't a sqlalchemy object
services = [dict(service.iteritems()) for service in services]
metadata = db.aggregate_host_get_by_metadata_key(context,
key='availability_zone')
for service in services:
az = CONF.internal_service_availability_zone
if service['topic'] == "compute":
if metadata.get(service['host']):
az = u','.join(list(metadata[service['host']]))
else:
az = CONF.default_availability_zone
# update the cache
cache = _get_cache()
cache_key = _make_cache_key(service['host'])
cache.delete(cache_key)
cache.set(cache_key, az, AZ_CACHE_SECONDS)
service['availability_zone'] = az
return services
def get_host_availability_zone(context, host, conductor_api=None):
if conductor_api:
metadata = conductor_api.aggregate_metadata_get_by_host(
context, host, key='availability_zone')
else:
metadata = db.aggregate_metadata_get_by_host(
context, host, key='availability_zone')
if 'availability_zone' in metadata:
az = list(metadata['availability_zone'])[0]
else:
az = CONF.default_availability_zone
return az
def get_availability_zones(context, get_only_available=False):
"""Return available and unavailable zones on demands.
:param get_only_available: flag to determine whether to return
available zones only, default False indicates return both
available zones and not available zones, True indicates return
available zones only
"""
enabled_services = db.service_get_all(context, False)
enabled_services = set_availability_zones(context, enabled_services)
available_zones = []
for zone in [service['availability_zone'] for service
in enabled_services]:
if zone not in available_zones:
available_zones.append(zone)
if not get_only_available:
disabled_services = db.service_get_all(context, True)
disabled_services = set_availability_zones(context, disabled_services)
not_available_zones = []
zones = [service['availability_zone'] for service in disabled_services
if service['availability_zone'] not in available_zones]
for zone in zones:
if zone not in not_available_zones:
not_available_zones.append(zone)
return (available_zones, not_available_zones)
else:
return available_zones
def get_instance_availability_zone(context, instance):
"""Return availability zone of specified instance."""
host = str(instance.get('host'))
if not host:
return None
cache_key = _make_cache_key(host)
cache = _get_cache()
az = cache.get(cache_key)
if not az:
elevated = context.elevated()
az = get_host_availability_zone(elevated, host)
cache.set(cache_key, az, AZ_CACHE_SECONDS)
return az
| apache-2.0 |
alyosha1879/ryu | ryu/utils.py | 6 | 4894 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import logging
import os
import sys
import re
LOG = logging.getLogger('ryu.utils')
def chop_py_suffix(p):
for suf in ['.py', '.pyc', '.pyo']:
if p.endswith(suf):
return p[:-len(suf)]
return p
def _likely_same(a, b):
try:
# Samefile not availible on windows
if sys.platform == 'win32':
if os.stat(a) == os.stat(b):
return True
else:
if os.path.samefile(a, b):
return True
except OSError:
# m.__file__ is not always accessible. eg. egg
return False
if chop_py_suffix(a) == chop_py_suffix(b):
return True
return False
def _find_loaded_module(modpath):
# copy() to avoid RuntimeError: dictionary changed size during iteration
for k, m in sys.modules.copy().iteritems():
if k == '__main__':
continue
if not hasattr(m, '__file__'):
continue
if _likely_same(m.__file__, modpath):
return m
return None
def import_module(modname):
try:
__import__(modname)
except:
abspath = os.path.abspath(modname)
mod = _find_loaded_module(abspath)
if mod:
return mod
opath = sys.path
sys.path.append(os.path.dirname(abspath))
name = os.path.basename(modname)
if name.endswith('.py'):
name = name[:-3]
__import__(name)
sys.path = opath
return sys.modules[name]
return sys.modules[modname]
def round_up(x, y):
return ((x + y - 1) / y) * y
def _str_to_hex(data):
"""Convert string into array of hexes to be printed."""
return ' '.join(hex(ord(char)) for char in data)
def _bytearray_to_hex(data):
"""Convert bytearray into array of hexes to be printed."""
return ' '.join(hex(byte) for byte in data)
def hex_array(data):
"""Convert string or bytearray into array of hexes to be printed."""
to_hex = {str: _str_to_hex,
bytearray: _bytearray_to_hex}
try:
return to_hex[type(data)](data)
except KeyError:
LOG.exception('%s is invalid data type' % type(data))
return None
# the following functions are taken from OpenStack
#
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
with open(requirements_file, 'r') as fil:
return fil.read().split('\n')
return []
def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
else:
requirements.append(line)
return requirements
| apache-2.0 |
caldwell/servo | tests/wpt/css-tests/tools/wptserve/tests/functional/base.py | 293 | 1831 | import base64
import logging
import os
import unittest
import urllib
import urllib2
import urlparse
import wptserve
logging.basicConfig()
here = os.path.split(__file__)[0]
doc_root = os.path.join(here, "docroot")
class Request(urllib2.Request):
def __init__(self, *args, **kwargs):
urllib2.Request.__init__(self, *args, **kwargs)
self.method = "GET"
def get_method(self):
return self.method
def add_data(self, data):
if hasattr(data, "iteritems"):
data = urllib.urlencode(data)
print data
self.add_header("Content-Length", str(len(data)))
urllib2.Request.add_data(self, data)
class TestUsingServer(unittest.TestCase):
def setUp(self):
self.server = wptserve.server.WebTestHttpd(host="localhost",
port=0,
use_ssl=False,
certificate=None,
doc_root=doc_root)
self.server.start(False)
def tearDown(self):
self.server.stop()
def abs_url(self, path, query=None):
return urlparse.urlunsplit(("http", "%s:%i" % (self.server.host, self.server.port), path, query, None))
def request(self, path, query=None, method="GET", headers=None, body=None, auth=None):
req = Request(self.abs_url(path, query))
req.method = method
if headers is None:
headers = {}
for name, value in headers.iteritems():
req.add_header(name, value)
if body is not None:
req.add_data(body)
if auth is not None:
req.add_header("Authorization", "Basic %s" % base64.encodestring('%s:%s' % auth))
return urllib2.urlopen(req)
| mpl-2.0 |
akash1808/nova_test_latest | nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py | 33 | 1894 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import platform
import mock
from os_brick.initiator import connector
from nova.compute import arch
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import fibrechannel
class LibvirtFibreChannelVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
def test_libvirt_fibrechan_driver(self):
libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
self.fake_conn)
self.assertIsInstance(libvirt_driver.connector,
connector.FibreChannelConnector)
def _test_libvirt_fibrechan_driver_s390(self):
libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
self.fake_conn)
self.assertIsInstance(libvirt_driver.connector,
connector.FibreChannelConnectorS390X)
@mock.patch.object(platform, 'machine', return_value=arch.S390)
def test_libvirt_fibrechan_driver_s390(self, mock_machine):
self._test_libvirt_fibrechan_driver_s390()
@mock.patch.object(platform, 'machine', return_value=arch.S390X)
def test_libvirt_fibrechan_driver_s390x(self, mock_machine):
self._test_libvirt_fibrechan_driver_s390()
| apache-2.0 |
storm-computers/odoo | addons/account_budget/wizard/account_budget_report.py | 47 | 1121 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp.osv import fields, osv
class account_budget_report(osv.osv_memory):
_name = "account.budget.report"
_description = "Account Budget report for analytic account"
_columns = {
'date_from': fields.date('Start of period', required=True),
'date_to': fields.date('End of period', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-01-01'),
'date_to': lambda *a: time.strftime('%Y-%m-%d'),
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'account.budget.post',
'form': data
}
datas['form']['ids'] = datas['ids']
datas['form']['report'] = 'analytic-full'
return self.pool['report'].get_action(cr, uid, [], 'account_budget.report_budget', data=datas, context=context)
| agpl-3.0 |
venkey-ariv/fullerite | src/diamond/collectors/nginx/nginx.py | 6 | 3290 | # coding=utf-8
"""
Collect statistics from Nginx
#### Dependencies
* urllib2
#### Usage
To enable the nginx status page to work with defaults,
add a file to /etc/nginx/sites-enabled/ (on Ubuntu) with the
following content:
<pre>
server {
listen 127.0.0.1:8080;
server_name localhost;
location /nginx_status {
stub_status on;
access_log /data/server/shared/log/access.log;
allow 127.0.0.1;
deny all;
}
}
</pre>
"""
import urllib2
import re
import diamond.collector
class NginxCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(NginxCollector, self).get_default_config_help()
config_help.update({
'req_host': 'Hostname',
'req_port': 'Port',
'req_path': 'Path',
})
return config_help
def get_default_config(self):
default_config = super(NginxCollector, self).get_default_config()
default_config['req_host'] = 'localhost'
default_config['req_port'] = 8080
default_config['req_path'] = '/nginx_status'
default_config['path'] = 'nginx'
return default_config
def collect(self):
url = 'http://%s:%i%s' % (self.config['req_host'],
int(self.config['req_port']),
self.config['req_path'])
activeConnectionsRE = re.compile(r'Active connections: (?P<conn>\d+)')
totalConnectionsRE = re.compile('^\s+(?P<conn>\d+)\s+'
+ '(?P<acc>\d+)\s+(?P<req>\d+)')
connectionStatusRE = re.compile('Reading: (?P<reading>\d+) '
+ 'Writing: (?P<writing>\d+) '
+ 'Waiting: (?P<waiting>\d+)')
req = urllib2.Request(url)
try:
handle = urllib2.urlopen(req)
for l in handle.readlines():
l = l.rstrip('\r\n')
if activeConnectionsRE.match(l):
self.publish_gauge(
'nginx.active_connections',
int(activeConnectionsRE.match(l).group('conn')))
elif totalConnectionsRE.match(l):
m = totalConnectionsRE.match(l)
req_per_conn = float(m.group('req')) / float(m.group('acc'))
self.publish_cumulative_counter('nginx.conn_accepted', int(m.group('conn')))
self.publish_cumulative_counter('nginx.conn_handled', int(m.group('acc')))
self.publish_cumulative_counter('nginx.req_handled', int(m.group('req')))
self.publish_gauge('nginx.req_per_conn', float(req_per_conn))
elif connectionStatusRE.match(l):
m = connectionStatusRE.match(l)
self.publish_gauge('nginx.act_reads', int(m.group('reading')))
self.publish_gauge('nginx.act_writes', int(m.group('writing')))
self.publish_gauge('nginx.act_waits', int(m.group('waiting')))
except IOError, e:
self.log.error("Unable to open %s" % url)
except Exception, e:
self.log.error("Unknown error opening url: %s", e)
| apache-2.0 |
wschenck/nest-simulator | extras/ConnPlotter/examples/non_dale.py | 20 | 2836 | # -*- coding: utf-8 -*-
#
# non_dale.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices
"""
Non-Dale example model.
Two layer A, B, with single population each.
Both layers make excitatory and inhibitory projections
to each other, violating Dale's law.
Build with
ConnectionPattern(..., ..., synTypes=(((SynType('exc', 1.0, 'b'),
SynType('inh', -1.0, 'r')),)))
"""
def non_dale():
"""
Build lists representing non-Dale network model.
Returns:
layerList, connectList, modelList
"""
def modCopy(orig, diff):
"""Create copy of dict orig, update with diff, return."""
assert (isinstance(orig, dict))
assert (isinstance(diff, dict))
tmp = orig.copy()
tmp.update(diff)
return tmp
N = 40
modelList = []
layerList = [('A', 'iaf_psc_alpha', [N, N], [1., 1.]),
('B', 'iaf_psc_alpha', [N, N], [1., 1.])]
common_connspec = {'rule': 'pairwise_bernoulli'}
common_synspec = {'synapse_model': 'static_synapse',
'delay': 1.0}
connectList = [
('A', 'B',
modCopy(common_connspec, {'mask': {'circular': {'radius': 0.2}}, 'p': 0.8}),
modCopy(common_synspec, {'weight': 2.0})),
('A', 'B',
modCopy(common_connspec, {'mask': {'circular': {'radius': 0.3}}, 'p': 0.4}),
modCopy(common_synspec, {'weight': -2.0})),
('B', 'A',
modCopy(common_connspec, {'mask': {'rectangular':
{'lower_left': [-0.4, -0.2],
'upper_right': [0.4, 0.2]}},
'p': 1.0}),
modCopy(common_synspec, {'weight': 2.0})),
('B', 'A',
modCopy(common_connspec, {'mask': {'rectangular':
{'lower_left': [-0.2, -0.4],
'upper_right': [0.2, 0.4]}},
'p': 1.0}),
modCopy(common_synspec, {'weight': -2.0})),
]
return layerList, connectList, modelList
| gpl-2.0 |
bentilly/heroes | lib/flask/testsuite/helpers.py | 405 | 21973 | # -*- coding: utf-8 -*-
"""
flask.testsuite.helpers
~~~~~~~~~~~~~~~~~~~~~~~
Various helpers.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import flask
import unittest
from logging import StreamHandler
from flask.testsuite import FlaskTestCase, catch_warnings, catch_stderr
from werkzeug.http import parse_cache_control_header, parse_options_header
from flask._compat import StringIO, text_type
def has_encoding(name):
try:
import codecs
codecs.lookup(name)
return True
except LookupError:
return False
class JSONTestCase(FlaskTestCase):
def test_json_bad_requests(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.jsonify(foo=text_type(flask.request.get_json()))
c = app.test_client()
rv = c.post('/json', data='malformed', content_type='application/json')
self.assert_equal(rv.status_code, 400)
def test_json_body_encoding(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.get_json()
c = app.test_client()
resp = c.get('/', data=u'"Hällo Wörld"'.encode('iso-8859-15'),
content_type='application/json; charset=iso-8859-15')
self.assert_equal(resp.data, u'Hällo Wörld'.encode('utf-8'))
def test_jsonify(self):
d = dict(a=23, b=42, c=[1, 2, 3])
app = flask.Flask(__name__)
@app.route('/kw')
def return_kwargs():
return flask.jsonify(**d)
@app.route('/dict')
def return_dict():
return flask.jsonify(d)
c = app.test_client()
for url in '/kw', '/dict':
rv = c.get(url)
self.assert_equal(rv.mimetype, 'application/json')
self.assert_equal(flask.json.loads(rv.data), d)
def test_json_as_unicode(self):
app = flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = True
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
self.assert_equal(rv, '"\\u2603"')
app.config['JSON_AS_ASCII'] = False
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
self.assert_equal(rv, u'"\u2603"')
def test_json_attr(self):
app = flask.Flask(__name__)
@app.route('/add', methods=['POST'])
def add():
json = flask.request.get_json()
return text_type(json['a'] + json['b'])
c = app.test_client()
rv = c.post('/add', data=flask.json.dumps({'a': 1, 'b': 2}),
content_type='application/json')
self.assert_equal(rv.data, b'3')
def test_template_escaping(self):
app = flask.Flask(__name__)
render = flask.render_template_string
with app.test_request_context():
rv = flask.json.htmlsafe_dumps('</script>')
self.assert_equal(rv, u'"\\u003c/script\\u003e"')
self.assert_equal(type(rv), text_type)
rv = render('{{ "</script>"|tojson }}')
self.assert_equal(rv, '"\\u003c/script\\u003e"')
rv = render('{{ "<\0/script>"|tojson }}')
self.assert_equal(rv, '"\\u003c\\u0000/script\\u003e"')
rv = render('{{ "<!--<script>"|tojson }}')
self.assert_equal(rv, '"\\u003c!--\\u003cscript\\u003e"')
rv = render('{{ "&"|tojson }}')
self.assert_equal(rv, '"\\u0026"')
rv = render('{{ "\'"|tojson }}')
self.assert_equal(rv, '"\\u0027"')
rv = render("<a ng-data='{{ data|tojson }}'></a>",
data={'x': ["foo", "bar", "baz'"]})
self.assert_equal(rv,
'<a ng-data=\'{"x": ["foo", "bar", "baz\\u0027"]}\'></a>')
def test_json_customization(self):
class X(object):
def __init__(self, val):
self.val = val
class MyEncoder(flask.json.JSONEncoder):
def default(self, o):
if isinstance(o, X):
return '<%d>' % o.val
return flask.json.JSONEncoder.default(self, o)
class MyDecoder(flask.json.JSONDecoder):
def __init__(self, *args, **kwargs):
kwargs.setdefault('object_hook', self.object_hook)
flask.json.JSONDecoder.__init__(self, *args, **kwargs)
def object_hook(self, obj):
if len(obj) == 1 and '_foo' in obj:
return X(obj['_foo'])
return obj
app = flask.Flask(__name__)
app.testing = True
app.json_encoder = MyEncoder
app.json_decoder = MyDecoder
@app.route('/', methods=['POST'])
def index():
return flask.json.dumps(flask.request.get_json()['x'])
c = app.test_client()
rv = c.post('/', data=flask.json.dumps({
'x': {'_foo': 42}
}), content_type='application/json')
self.assertEqual(rv.data, b'"<42>"')
def test_modified_url_encoding(self):
class ModifiedRequest(flask.Request):
url_charset = 'euc-kr'
app = flask.Flask(__name__)
app.testing = True
app.request_class = ModifiedRequest
app.url_map.charset = 'euc-kr'
@app.route('/')
def index():
return flask.request.args['foo']
rv = app.test_client().get(u'/?foo=정상처리'.encode('euc-kr'))
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, u'정상처리'.encode('utf-8'))
if not has_encoding('euc-kr'):
test_modified_url_encoding = None
def test_json_key_sorting(self):
app = flask.Flask(__name__)
app.testing = True
self.assert_equal(app.config['JSON_SORT_KEYS'], True)
d = dict.fromkeys(range(20), 'foo')
@app.route('/')
def index():
return flask.jsonify(values=d)
c = app.test_client()
rv = c.get('/')
lines = [x.strip() for x in rv.data.strip().decode('utf-8').splitlines()]
self.assert_equal(lines, [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo"',
'}',
'}'
])
class SendfileTestCase(FlaskTestCase):
def test_send_file_regular(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.send_file('static/index.html')
self.assert_true(rv.direct_passthrough)
self.assert_equal(rv.mimetype, 'text/html')
with app.open_resource('static/index.html') as f:
rv.direct_passthrough = False
self.assert_equal(rv.data, f.read())
rv.close()
def test_send_file_xsendfile(self):
app = flask.Flask(__name__)
app.use_x_sendfile = True
with app.test_request_context():
rv = flask.send_file('static/index.html')
self.assert_true(rv.direct_passthrough)
self.assert_in('x-sendfile', rv.headers)
self.assert_equal(rv.headers['x-sendfile'],
os.path.join(app.root_path, 'static/index.html'))
self.assert_equal(rv.mimetype, 'text/html')
rv.close()
def test_send_file_object(self):
app = flask.Flask(__name__)
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
rv.direct_passthrough = False
with app.open_resource('static/index.html') as f:
self.assert_equal(rv.data, f.read())
self.assert_equal(rv.mimetype, 'text/html')
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
app.use_x_sendfile = True
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
self.assert_equal(rv.mimetype, 'text/html')
self.assert_in('x-sendfile', rv.headers)
self.assert_equal(rv.headers['x-sendfile'],
os.path.join(app.root_path, 'static/index.html'))
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
app.use_x_sendfile = False
with app.test_request_context():
with catch_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f)
rv.direct_passthrough = False
self.assert_equal(rv.data, b'Test')
self.assert_equal(rv.mimetype, 'application/octet-stream')
rv.close()
# etags
self.assert_equal(len(captured), 1)
with catch_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f, mimetype='text/plain')
rv.direct_passthrough = False
self.assert_equal(rv.data, b'Test')
self.assert_equal(rv.mimetype, 'text/plain')
rv.close()
# etags
self.assert_equal(len(captured), 1)
app.use_x_sendfile = True
with catch_warnings() as captured:
with app.test_request_context():
f = StringIO('Test')
rv = flask.send_file(f)
self.assert_not_in('x-sendfile', rv.headers)
rv.close()
# etags
self.assert_equal(len(captured), 1)
def test_attachment(self):
app = flask.Flask(__name__)
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f, as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
with app.test_request_context():
self.assert_equal(options['filename'], 'index.html')
rv = flask.send_file('static/index.html', as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
self.assert_equal(options['filename'], 'index.html')
rv.close()
with app.test_request_context():
rv = flask.send_file(StringIO('Test'), as_attachment=True,
attachment_filename='index.txt',
add_etags=False)
self.assert_equal(rv.mimetype, 'text/plain')
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
self.assert_equal(options['filename'], 'index.txt')
rv.close()
def test_static_file(self):
app = flask.Flask(__name__)
# default cache timeout is 12 hours
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 12 * 60 * 60)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 12 * 60 * 60)
rv.close()
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 3600
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 3600)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 3600)
rv.close()
class StaticFileApp(flask.Flask):
def get_send_file_max_age(self, filename):
return 10
app = StaticFileApp(__name__)
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 10)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 10)
rv.close()
class LoggingTestCase(FlaskTestCase):
def test_logger_cache(self):
app = flask.Flask(__name__)
logger1 = app.logger
self.assert_true(app.logger is logger1)
self.assert_equal(logger1.name, __name__)
app.logger_name = __name__ + '/test_logger_cache'
self.assert_true(app.logger is not logger1)
def test_debug_log(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
app.logger.warning('the standard library is dead')
app.logger.debug('this is a debug statement')
return ''
@app.route('/exc')
def exc():
1 // 0
with app.test_client() as c:
with catch_stderr() as err:
c.get('/')
out = err.getvalue()
self.assert_in('WARNING in helpers [', out)
self.assert_in(os.path.basename(__file__.rsplit('.', 1)[0] + '.py'), out)
self.assert_in('the standard library is dead', out)
self.assert_in('this is a debug statement', out)
with catch_stderr() as err:
try:
c.get('/exc')
except ZeroDivisionError:
pass
else:
self.assert_true(False, 'debug log ate the exception')
def test_debug_log_override(self):
app = flask.Flask(__name__)
app.debug = True
app.logger_name = 'flask_tests/test_debug_log_override'
app.logger.level = 10
self.assert_equal(app.logger.level, 10)
def test_exception_logging(self):
out = StringIO()
app = flask.Flask(__name__)
app.logger_name = 'flask_tests/test_exception_logging'
app.logger.addHandler(StreamHandler(out))
@app.route('/')
def index():
1 // 0
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_in(b'Internal Server Error', rv.data)
err = out.getvalue()
self.assert_in('Exception on / [GET]', err)
self.assert_in('Traceback (most recent call last):', err)
self.assert_in('1 // 0', err)
self.assert_in('ZeroDivisionError:', err)
def test_processor_exceptions(self):
app = flask.Flask(__name__)
@app.before_request
def before_request():
if trigger == 'before':
1 // 0
@app.after_request
def after_request(response):
if trigger == 'after':
1 // 0
return response
@app.route('/')
def index():
return 'Foo'
@app.errorhandler(500)
def internal_server_error(e):
return 'Hello Server Error', 500
for trigger in 'before', 'after':
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_equal(rv.data, b'Hello Server Error')
def test_url_for_with_anchor(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_equal(flask.url_for('index', _anchor='x y'),
'/#x%20y')
def test_url_for_with_scheme(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_equal(flask.url_for('index',
_external=True,
_scheme='https'),
'https://localhost/')
def test_url_for_with_scheme_not_external(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_raises(ValueError,
flask.url_for,
'index',
_scheme='https')
def test_url_with_method(self):
from flask.views import MethodView
app = flask.Flask(__name__)
class MyView(MethodView):
def get(self, id=None):
if id is None:
return 'List'
return 'Get %d' % id
def post(self):
return 'Create'
myview = MyView.as_view('myview')
app.add_url_rule('/myview/', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/<int:id>', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/create', methods=['POST'],
view_func=myview)
with app.test_request_context():
self.assert_equal(flask.url_for('myview', _method='GET'),
'/myview/')
self.assert_equal(flask.url_for('myview', id=42, _method='GET'),
'/myview/42')
self.assert_equal(flask.url_for('myview', _method='POST'),
'/myview/create')
class NoImportsTestCase(FlaskTestCase):
"""Test Flasks are created without import.
Avoiding ``__import__`` helps create Flask instances where there are errors
at import time. Those runtime errors will be apparent to the user soon
enough, but tools which build Flask instances meta-programmatically benefit
from a Flask which does not ``__import__``. Instead of importing to
retrieve file paths or metadata on a module or package, use the pkgutil and
imp modules in the Python standard library.
"""
def test_name_with_import_error(self):
try:
flask.Flask('importerror')
except NotImplementedError:
self.fail('Flask(import_name) is importing import_name.')
class StreamingTestCase(FlaskTestCase):
def test_streaming_with_context(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(generate()))
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
def test_streaming_with_context_as_decorator(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.stream_with_context
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(generate())
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
def test_streaming_with_context_and_custom_close(self):
app = flask.Flask(__name__)
app.testing = True
called = []
class Wrapper(object):
def __init__(self, gen):
self._gen = gen
def __iter__(self):
return self
def close(self):
called.append(42)
def __next__(self):
return next(self._gen)
next = __next__
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(
Wrapper(generate())))
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
self.assertEqual(called, [42])
def suite():
suite = unittest.TestSuite()
if flask.json_available:
suite.addTest(unittest.makeSuite(JSONTestCase))
suite.addTest(unittest.makeSuite(SendfileTestCase))
suite.addTest(unittest.makeSuite(LoggingTestCase))
suite.addTest(unittest.makeSuite(NoImportsTestCase))
suite.addTest(unittest.makeSuite(StreamingTestCase))
return suite
| apache-2.0 |
newemailjdm/pybrain | examples/rl/environments/ode/ccrl_glass_pgpe.py | 30 | 2812 | #!/usr/bin/env python
#########################################################################
# Reinforcement Learning with PGPE on the CCRL ODE Environment
#
# The CCRL robot is a body structure with 2x 7 DoF Arms.
# Complex grasping tasks can be learned with this environment.
#
# Control/Actions:
# The agent can control all 14 DOF of the robot arms plus the 2 hands.
#
# A wide variety of sensors are available for observation and reward:
# - 16 angles of joints
# - 16 angle velocitys of joints
# - Number of hand parts that have contact to target object
# - collision with table
# - distance of hand to target
# - angle of hand to horizontal and vertical plane
#
# Task available are:
# - Grasp Task, agent has to get hold of the object with avoiding collision with table
#
# Requirements: pylab (for plotting only). If not available, comment the
# last 3 lines out
# Author: Frank Sehnke, [email protected]
#########################################################################
__author__ = "Frank Sehnke"
__version__ = '$Id$'
from pybrain.tools.example_tools import ExTools
from pybrain.rl.environments.ode import CCRLEnvironment
from pybrain.rl.environments.ode.tasks import CCRLGlasTask
from pybrain.structure.modules.tanhlayer import TanhLayer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.agents import OptimizationAgent
from pybrain.optimization import PGPE
from pybrain.rl.experiments import EpisodicExperiment
hiddenUnits = 4
batch=1 #number of samples per learning step
prnts=1 #number of learning steps after results are printed
epis=2000/batch/prnts #number of roleouts
numbExp=10 #number of experiments
et = ExTools(batch, prnts) #tool for printing and plotting
env = None
for runs in range(numbExp):
# create environment
#Options: XML-Model, Bool(OpenGL), Bool(Realtime simu. while client is connected), ServerIP(default:localhost), Port(default:21560)
if env != None: env.closeSocket()
env = CCRLEnvironment()
# create task
task = CCRLGlasTask(env)
# create controller network
net = buildNetwork(len(task.getObservation()), hiddenUnits, env.actLen, outclass=TanhLayer) #, hiddenUnits
# create agent with controller and learner (and its options)
agent = OptimizationAgent(net, PGPE(storeAllEvaluations = True))
et.agent = agent
# create the experiment
experiment = EpisodicExperiment(task, agent)
#Do the experiment
for updates in range(epis):
for i in range(prnts):
experiment.doEpisodes(batch)
et.printResults((agent.learner._allEvaluations)[-50:-1], runs, updates)
et.addExps()
et.showExps()
#To view what the simulation is doing at the moment, go to pybrain/rl/environments/ode/ and start viewer.py (python-openGL musst be installed, see PyBrain documentation)
| bsd-3-clause |
Omegaphora/external_chromium_org | tools/memory_inspector/memory_inspector/classification/native_heap_classifier_unittest.py | 89 | 5824 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from memory_inspector.classification import native_heap_classifier
from memory_inspector.core import native_heap
from memory_inspector.core import stacktrace
from memory_inspector.core import symbol
_TEST_RULES = """
[
{
'name': 'content',
'source_path': r'content/',
'children': [
{
'name': 'browser',
'stacktrace': r'content::browser',
'source_path': r'content/browser',
},
{
'name': 'renderer',
'stacktrace': r'content::renderer',
},
],
},
{
'name': 'ashmem_in_skia',
'stacktrace': [r'sk::', r'ashmem::'],
},
]
"""
_TEST_STACK_TRACES = [
(3, [('stack_frame_0::foo()', '/ignored.c'),
('this_goes_under_totals_other', '/ignored.c')]),
(5, [('foo', '/ignored.c'),
('content::browser::something()', '/content/browser/something.cc'),
('bar', '/ignored.c')]),
(7, [('content::browser::something_else()', '/content/browser/else.cc')]),
(11, [('content::browser::not_really()', '/content/subtle/something.cc'),
('foo', '/ignored.c')]),
(13, [('foo', '/ignored.c'),
('content::renderer::something()', '/content/renderer/foo.c'),
('bar', '/ignored.c')]),
(17, [('content::renderer::something_else()', '/content/renderer/foo.c')]),
(19, [('content::renderer::something_else_2()', '/content/renderer/bar.c'),
('foo', '/ignored.c')]),
(23, [('content::something_different', '/content/foo.c')]),
(29, [('foo', '/ignored.c'),
('sk::something', '/skia/something.c'),
('not_ashsmem_goes_into_totals_other', '/ignored.c')]),
(31, [('foo', '/ignored.c'),
('sk::something', '/skia/something.c'),
('foo::bar', '/ignored.c'),
('sk::foo::ashmem::alloc()', '/skia/ashmem.c')]),
(37, [('foo', '/ignored.c'),
('sk::something', '/ignored.c'),
('sk::foo::ashmem::alloc()', '/ignored.c')]),
(43, [('foo::ashmem::alloc()', '/ignored.c'),
('sk::foo', '/ignored.c'),
('wrong_order_goes_into_totals', '/ignored.c')])
]
_EXPECTED_RESULTS = {
'Total': [238, 0],
'Total::content': [95, 0],
'Total::content::browser': [12, 0], # 5 + 7.
'Total::content::renderer': [49, 0], # 13 + 17 + 19.
'Total::content::content-other': [34, 0],
'Total::ashmem_in_skia': [68, 0], # 31 + 37.
'Total::Total-other': [75, 0], # 3 + 29 + 43.
}
_HEURISTIC_TEST_STACK_TRACES = [
(10, '/root/base1/foo/bar/file.cc'), # Contrib: 0.13
(20, '/root/base1/foo/baz/file.cc'), # Contrib: 0.26
(1, '/root/base1/foo/nah/file.cc'), # Contrib: 0.01
(3, '/root/base2/file.cc'), # Contrib: 0.03
(22, '/root/base2/subpath/file.cc'), # Contrib: 0.28
(18, '/root/base2/subpath2/file.cc'), # Contrib: 0.23
(2, '/root/whatever/file.cc'), # Contrib: 0.02
]
_HEURISTIC_EXPECTED_RESULTS = {
'Total': [76, 0],
'Total::/root/': [76, 0],
'Total::/root/::base1/foo/': [31, 0], # 10 + 20 +1
'Total::/root/::base1/foo/::bar/': [10, 0],
'Total::/root/::base1/foo/::baz/': [20, 0],
'Total::/root/::base1/foo/::base1/foo/-other': [1, 0],
'Total::/root/::base2/': [43, 0], # 3 + 22 + 18
'Total::/root/::base2/::subpath/': [22, 0],
'Total::/root/::base2/::subpath2/': [18, 0],
'Total::/root/::base2/::base2/-other': [3, 0],
'Total::/root/::/root/-other': [2, 0],
'Total::Total-other': [0, 0],
}
class NativeHeapClassifierTest(unittest.TestCase):
def testStandardRuleParsingAndProcessing(self):
rule_tree = native_heap_classifier.LoadRules(_TEST_RULES)
nheap = native_heap.NativeHeap()
mock_addr = 0
for test_entry in _TEST_STACK_TRACES:
mock_strace = stacktrace.Stacktrace()
for (mock_btstr, mock_source_path) in test_entry[1]:
mock_addr += 4 # Addr is irrelevant, just keep it distinct.
mock_frame = stacktrace.Frame(mock_addr)
mock_frame.SetSymbolInfo(symbol.Symbol(mock_btstr, mock_source_path))
mock_strace.Add(mock_frame)
nheap.Add(native_heap.Allocation(
size=test_entry[0], stack_trace=mock_strace))
res = native_heap_classifier.Classify(nheap, rule_tree)
self._CheckResult(res.total, '', _EXPECTED_RESULTS)
def testInferHeuristicRules(self):
nheap = native_heap.NativeHeap()
mock_addr = 0
for (mock_alloc_size, mock_source_path) in _HEURISTIC_TEST_STACK_TRACES:
mock_strace = stacktrace.Stacktrace()
mock_addr += 4 # Addr is irrelevant, just keep it distinct.
mock_frame = stacktrace.Frame(mock_addr)
mock_frame.SetSymbolInfo(symbol.Symbol(str(mock_addr), mock_source_path))
for _ in xrange(10): # Just repeat the same stack frame 10 times
mock_strace.Add(mock_frame)
nheap.Add(native_heap.Allocation(
size=mock_alloc_size, stack_trace=mock_strace))
rule_tree = native_heap_classifier.InferHeuristicRulesFromHeap(
nheap, threshold=0.05)
res = native_heap_classifier.Classify(nheap, rule_tree)
self._CheckResult(res.total, '', _HEURISTIC_EXPECTED_RESULTS)
def _CheckResult(self, node, prefix, expected_results):
node_name = prefix + node.name
self.assertIn(node_name, expected_results)
self.assertEqual(node.values, expected_results[node_name])
for child in node.children:
self._CheckResult(child, node_name + '::', expected_results) | bsd-3-clause |
skbkontur/Diamond | src/collectors/mesos_cgroup/test/testmesos_cgroup.py | 16 | 6777 | #!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import MagicMock, Mock, mock_open
from mock import patch
from diamond.collector import Collector
from mesos_cgroup import MesosCGroupCollector
##########################################################################
class TestMesosCGroupCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('MesosCGroupCollector', {})
self.collector = MesosCGroupCollector(config, None)
def test_import(self):
self.assertTrue(MesosCGroupCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
task_id = 'b0d5971e-915c-414b-aa25-0da46e64ff4e'
def urlopen_se(url):
if url == 'http://localhost:5051/state.json':
return self.getFixture('state.json')
else:
print url
raise NotImplementedError()
def listdir_se(directory):
cgroup_directories = [
'/sys/fs/cgroup/cpuacct/mesos',
'/sys/fs/cgroup/cpu/mesos',
'/sys/fs/cgroup/memory/mesos'
]
if directory in cgroup_directories:
return ["b0d5971e-915c-414b-aa25-0da46e64ff4e"]
else:
print directory
raise NotImplementedError()
def isdir_se(directory):
task_directories = [
'/sys/fs/cgroup/cpuacct/mesos/%s' % task_id,
'/sys/fs/cgroup/cpu/mesos/%s' % task_id,
'/sys/fs/cgroup/memory/mesos/%s' % task_id
]
if directory in task_directories:
return True
else:
print directory
raise NotImplementedError()
def open_se(path, mode='r', create=True):
if path.endswith('cpuacct/mesos/%s/cpuacct.usage' % task_id):
fixture = self.getFixture('cpuacct.usage')
m = mock_open(read_data=fixture.getvalue())
m.__enter__.return_value = fixture
return m
elif path.endswith('cpuacct/mesos/%s/cpuacct.stat' % task_id):
fixture = self.getFixture('cpuacct.stat')
m = mock_open(read_data=fixture.getvalue())
m.__enter__.return_value = fixture
return m
elif path.endswith('cpu/mesos/%s/cpu.stat' % task_id):
fixture = self.getFixture('cpu.stat')
m = mock_open(read_data=fixture.getvalue())
m.__enter__.return_value = fixture
return m
elif path.endswith('memory/mesos/%s/memory.stat' % task_id):
fixture = self.getFixture('memory.stat')
m = mock_open(read_data=fixture.getvalue())
m.__enter__.return_value = fixture
return m
else:
patch_open.stop()
o = open(path, mode, create)
patch_open.start()
return o
patch_urlopen = patch('urllib2.urlopen', Mock(side_effect=urlopen_se))
patch_listdir = patch('os.listdir', Mock(side_effect=listdir_se))
patch_isdir = patch('os.path.isdir', Mock(side_effect=isdir_se))
patch_open = patch('__builtin__.open', MagicMock(spec=file,
side_effect=open_se))
patch_urlopen.start()
patch_listdir.start()
patch_isdir.start()
patch_open.start()
self.collector.collect()
patch_open.stop()
patch_isdir.stop()
patch_listdir.stop()
patch_urlopen.stop()
metrics = self.get_metrics()
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
def get_metrics(self):
return {
'ENVIRONMENT.ROLE.TASK.0.cpuacct.usage': '170379797227518',
'ENVIRONMENT.ROLE.TASK.0.cpuacct.user': '9333852',
'ENVIRONMENT.ROLE.TASK.0.cpuacct.system': '2774846',
'ENVIRONMENT.ROLE.TASK.0.cpu.nr_periods': '26848849',
'ENVIRONMENT.ROLE.TASK.0.cpu.nr_throttled': '85144',
'ENVIRONMENT.ROLE.TASK.0.cpu.throttled_time': '34709931864651',
'ENVIRONMENT.ROLE.TASK.0.memory.cache': '233398272',
'ENVIRONMENT.ROLE.TASK.0.memory.rss': '1789911040',
'ENVIRONMENT.ROLE.TASK.0.memory.rss_huge': '1642070016',
'ENVIRONMENT.ROLE.TASK.0.memory.mapped_file': '1118208',
'ENVIRONMENT.ROLE.TASK.0.memory.writeback': '0',
'ENVIRONMENT.ROLE.TASK.0.memory.pgpgin': '375953210',
'ENVIRONMENT.ROLE.TASK.0.memory.pgpgout': '385688436',
'ENVIRONMENT.ROLE.TASK.0.memory.pgfault': '353980394',
'ENVIRONMENT.ROLE.TASK.0.memory.pgmajfault': '157',
'ENVIRONMENT.ROLE.TASK.0.memory.inactive_anon': '0',
'ENVIRONMENT.ROLE.TASK.0.memory.active_anon': '1789911040',
'ENVIRONMENT.ROLE.TASK.0.memory.inactive_file': '52654080',
'ENVIRONMENT.ROLE.TASK.0.memory.active_file': '180727808',
'ENVIRONMENT.ROLE.TASK.0.memory.unevictable': '0',
'ENVIRONMENT.ROLE.TASK.0.memory.hierarchical_memory_limit': '3355443200', # noqa
'ENVIRONMENT.ROLE.TASK.0.memory.total_cache': '233398272',
'ENVIRONMENT.ROLE.TASK.0.memory.total_rss': '1789911040',
'ENVIRONMENT.ROLE.TASK.0.memory.total_rss_huge': '1642070016',
'ENVIRONMENT.ROLE.TASK.0.memory.total_mapped_file': '1118208',
'ENVIRONMENT.ROLE.TASK.0.memory.total_writeback': '0',
'ENVIRONMENT.ROLE.TASK.0.memory.total_pgpgin': '375953210',
'ENVIRONMENT.ROLE.TASK.0.memory.total_pgpgout': '385688436',
'ENVIRONMENT.ROLE.TASK.0.memory.total_pgfault': '353980394',
'ENVIRONMENT.ROLE.TASK.0.memory.total_pgmajfault': '157',
'ENVIRONMENT.ROLE.TASK.0.memory.total_inactive_anon': '0',
'ENVIRONMENT.ROLE.TASK.0.memory.total_active_anon': '1789911040',
'ENVIRONMENT.ROLE.TASK.0.memory.total_inactive_file': '52654080',
'ENVIRONMENT.ROLE.TASK.0.memory.total_active_file': '180727808',
'ENVIRONMENT.ROLE.TASK.0.memory.total_unevictable': '0'
}
##########################################################################
if __name__ == "__main__":
unittest.main()
| mit |
usingnamespace/pyramid_authsanity | src/pyramid_authsanity/__init__.py | 1 | 2689 | from pyramid.settings import asbool, aslist
from .interfaces import IAuthSourceService
from .policy import AuthServicePolicy
from .sources import (
CookieAuthSourceInitializer,
HeaderAuthSourceInitializer,
SessionAuthSourceInitializer,
)
from .util import int_or_none, kw_from_settings
default_settings = (
("source", str, ""),
("debug", asbool, False),
("cookie.cookie_name", str, "auth"),
("cookie.max_age", int_or_none, None),
("cookie.httponly", asbool, True),
("cookie.path", str, "/"),
("cookie.domains", aslist, []),
("cookie.debug", asbool, False),
("session.value_key", str, "sanity."),
)
def init_cookie_source(config, settings):
if "authsanity.secret" not in settings:
raise RuntimeError("authsanity.secret is required for cookie based storage")
kw = kw_from_settings(settings, "authsanity.cookie.")
config.register_service_factory(
CookieAuthSourceInitializer(settings["authsanity.secret"], **kw),
iface=IAuthSourceService,
)
def init_session_source(config, settings):
kw = kw_from_settings(settings, "authsanity.session.")
config.register_service_factory(
SessionAuthSourceInitializer(**kw), iface=IAuthSourceService
)
def init_authorization_header_source(config, settings):
if "authsanity.secret" not in settings:
raise RuntimeError(
"authsanity.secret is required for Authorization header source"
)
kw = kw_from_settings(settings, "authsanity.header.")
config.register_service_factory(
HeaderAuthSourceInitializer(settings["authsanity.secret"], **kw),
iface=IAuthSourceService,
)
default_sources = {
"cookie": init_cookie_source,
"session": init_session_source,
"header": init_authorization_header_source,
}
# Stolen from pyramid_debugtoolbar
def parse_settings(settings):
parsed = {}
def populate(name, convert, default):
name = "%s%s" % ("authsanity.", name)
value = convert(settings.get(name, default))
parsed[name] = value
for name, convert, default in default_settings:
populate(name, convert, default)
return parsed
def includeme(config):
# Go parse the settings
settings = parse_settings(config.registry.settings)
# Update the config
config.registry.settings.update(settings)
# include pyramid_services
config.include("pyramid_services")
if settings["authsanity.source"] in default_sources:
default_sources[settings["authsanity.source"]](config, config.registry.settings)
config.set_authentication_policy(
AuthServicePolicy(debug=settings["authsanity.debug"])
)
| isc |
Rickyfox/MLMA2 | core/DataHandler.py | 1 | 1770 | '''
Created on Dec 17, 2014
@author: Dominik Lang
'''
import csv
import os.path
from random import shuffle
import collections
import numpy
from sklearn.preprocessing import Imputer
class DataHandler(object):
def __init__(self):
pass
'''
@summary: A method to handle reading the data in from the csv file
@return: List containing the rows of the dataset as seperate lists
'''
def readData(self):
# We get the path to the current file, then go one directory up to find the data file
basepath = os.path.dirname(__file__)
filepath = os.path.abspath(os.path.join(basepath, "..","data.csv"))
output=[]
with open(filepath, 'rb') as csvfile:
i=0
linereader = csv.reader(csvfile, delimiter=',')
for row in linereader:
if i==0:
i+=1
continue
output.append(row)
return output
'''
@summary: A method that splits the dataset into a training and a test set
'''
def splitData(self,dataset):
sets = collections.namedtuple('Sets', ['train', 'test'])
third=len(dataset)/3
shuffle(dataset)
testset=dataset[0:third]
trainset=dataset[third:-1]
s=sets(trainset,testset)
return s
def vectorizeData(self,dataset):
vectors = collections.namedtuple('vectors', ['X', 'Y'])
x=[]
y=[]
for i in dataset:
atts=i[0:-2]
c=i[-1]
x.append(atts)
y.append(c)
x=numpy.asarray(x)
y=numpy.asarray(y)
output=vectors(x,y)
return output
| gpl-2.0 |
shakamunyi/tensorflow | tensorflow/contrib/layers/python/layers/feature_column.py | 19 | 105773 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features in `Estimator` models.
FeatureColumns are the primary way of encoding features for pre-canned
`Estimator` models.
When using FeatureColumns with `Estimator` models, the type of feature column
you should choose depends on (1) the feature type and (2) the model type.
(1) Feature type:
* Continuous features can be represented by `real_valued_column`.
* Categorical features can be represented by any `sparse_column_with_*`
column (`sparse_column_with_keys`, `sparse_column_with_vocabulary_file`,
`sparse_column_with_hash_bucket`, `sparse_column_with_integerized_feature`).
(2) Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = real_valued_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `one_hot_column`. `one_hot_column` will create a dense
boolean tensor with an entry for each possible value, and thus the
computation cost is linear in the number of possible values versus the number
of values that occur in the sparse tensor. Thus using a "one_hot_column" is
only recommended for features with only a few possible values. For features
with many possible values or for very sparse features, `embedding_column` is
recommended.
embedded_dept_column = embedding_column(
sparse_column_with_keys("department", ["math", "philosphy", ...]),
dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models. When doing so
an embedding_lookups are used to efficiently perform the sparse matrix
multiplication.
dept_column = sparse_column_with_keys("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=[department_column, bucketized_age_column],
hash_bucket_size=1000)
Example of building an `Estimator` model using FeatureColumns:
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_from_feature_columns` within
`feature_column_ops.py`.
Example of building a non-`Estimator` model using FeatureColumns:
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
See feature_column_ops_test for more examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import six
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.ops import bucketization_op
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.contrib.layers.python.ops import sparse_ops as contrib_sparse_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
class _LinearEmbeddingLookupArguments(
collections.namedtuple("_LinearEmbeddingLookupArguments",
["input_tensor",
"weight_tensor",
"vocab_size",
"initializer",
"combiner"])):
"""Represents the information needed from a column for embedding lookup.
Used to compute DNN inputs and weighted sum.
"""
pass
class _DeepEmbeddingLookupArguments(
collections.namedtuple("_DeepEmbeddingLookupArguments",
["input_tensor",
"weight_tensor",
"vocab_size",
"initializer",
"combiner",
"dimension",
"shared_embedding_name",
"hash_key",
"max_norm",
"trainable"])):
"""Represents the information needed from a column for embedding lookup.
Used to compute DNN inputs and weighted sum.
"""
pass
class _FeatureColumn(object):
"""Represents a feature column abstraction.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. For example "country:US" is a feature which is in "country" feature
column and has a feature value ("US").
This class is an abstract class. User should not create one instance of this.
Following classes (_SparseColumn, _RealValuedColumn, ...) are concrete
instances.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def name(self):
"""Returns the name of column or transformed column."""
pass
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def config(self):
"""Returns configuration of the base feature for `tf.parse_example`."""
pass
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
pass
@abc.abstractmethod
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
raise NotImplementedError("Transform is not implemented for {}.".format(
self))
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collection=None,
trainable=True,
output_rank=2):
"""Returns a Tensor as an input to the first layer of neural network."""
raise ValueError("Calling an abstract method.")
def _deep_embedding_lookup_arguments(self, input_tensor):
"""Returns arguments to embedding lookup to build an input layer."""
raise NotImplementedError(
"No deep embedding lookup arguments for column {}.".format(self))
# It is expected that classes implement either wide_embedding_lookup_arguments
# or to_dense_tensor to be used in linear models.
# pylint: disable=unused-argument
def _wide_embedding_lookup_arguments(self, input_tensor):
"""Returns arguments to look up embeddings for this column."""
raise NotImplementedError(
"No wide embedding lookup arguments for column {}.".format(self))
# pylint: disable=unused-argument
def _to_dense_tensor(self, input_tensor):
"""Returns a dense tensor representing this column's values."""
raise NotImplementedError(
"No dense tensor representation for column {}.".format(self))
def _checkpoint_path(self):
"""Returns None, or a (path,tensor_name) to load a checkpoint from."""
return None
def _key_without_properties(self, properties):
"""Helper method for self.key() that omits particular properties."""
fields_values = []
# pylint: disable=protected-access
for i, k in enumerate(self._fields):
if k in properties:
# Excludes a property from the key.
# For instance, exclude `initializer` from the key of EmbeddingColumn
# since we don't support users specifying different initializers for
# the same embedding column. Ditto for `normalizer` and
# RealValuedColumn.
# Special treatment is needed since the default str form of a
# function contains its address, which could introduce non-determinism
# in sorting.
continue
fields_values.append("{}={}".format(k, self[i]))
# pylint: enable=protected-access
# This is effectively the same format as str(self), except with our special
# treatment.
return "{}({})".format(type(self).__name__, ", ".join(fields_values))
# TODO(b/30410315): Support warm starting in all feature columns.
class _SparseColumn(
_FeatureColumn,
fc_core._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple("_SparseColumn", [
"column_name", "is_integerized", "bucket_size", "lookup_config",
"combiner", "dtype"
])):
"""Represents a sparse feature column also known as categorical features.
Instances of this class are immutable. A sparse column means features are
sparse and dictionary returned by InputBuilder contains a
("column_name", SparseTensor) pair.
One and only one of bucket_size or lookup_config should be set. If
is_integerized is True then bucket_size should be set.
Attributes:
column_name: A string defining sparse column name.
is_integerized: A bool if True means type of feature is an integer.
Integerized means we can use the feature itself as id.
bucket_size: An int that is > 0. The number of buckets.
lookup_config: A _SparseIdLookupConfig defining feature-to-id lookup
configuration
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features, either `tf.string` or `tf.int64`.
Raises:
TypeError: if lookup_config is not a _SparseIdLookupConfig.
ValueError: if above expectations about input fails.
"""
def __new__(cls,
column_name,
is_integerized=False,
bucket_size=None,
lookup_config=None,
combiner="sum",
dtype=dtypes.string):
if is_integerized and bucket_size is None:
raise ValueError("bucket_size must be set if is_integerized is True. "
"column_name: {}".format(column_name))
if is_integerized and not dtype.is_integer:
raise ValueError("dtype must be an integer if is_integerized is True. "
"dtype: {}, column_name: {}.".format(dtype, column_name))
if dtype != dtypes.string and not dtype.is_integer:
raise ValueError("dtype must be string or integer. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if bucket_size is None and lookup_config is None:
raise ValueError("one of bucket_size or lookup_config must be set. "
"column_name: {}".format(column_name))
if bucket_size is not None and lookup_config:
raise ValueError("one and only one of bucket_size or lookup_config "
"must be set. column_name: {}".format(column_name))
if bucket_size is not None and bucket_size < 1:
raise ValueError("bucket_size must be at least 1. "
"bucket_size: {}, column_name: {}".format(bucket_size,
column_name))
if ((lookup_config) and
(not isinstance(lookup_config, _SparseIdLookupConfig))):
raise TypeError(
"lookup_config must be an instance of _SparseIdLookupConfig. "
"Given one is in type {} for column_name {}".format(
type(lookup_config), column_name))
if (lookup_config and lookup_config.vocabulary_file and
lookup_config.vocab_size is None):
raise ValueError("vocab_size must be defined. "
"column_name: {}".format(column_name))
return super(_SparseColumn, cls).__new__(
cls,
column_name,
is_integerized=is_integerized,
bucket_size=bucket_size,
lookup_config=lookup_config,
combiner=combiner,
dtype=dtype)
@property
def name(self):
return self.column_name
@property
def length(self):
"""Returns vocabulary or hash_bucket size."""
if self.bucket_size is not None:
return self.bucket_size
return self.lookup_config.vocab_size + self.lookup_config.num_oov_buckets
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(self.dtype)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor
# pylint: disable=unused-argument
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return None
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError(
"SparseColumn is not supported in DNN. "
"Please use embedding_column or one_hot_column. column: {}".format(
self))
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.id_tensor(input_tensor),
weight_tensor=self.weight_tensor(input_tensor),
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.combiner)
def _get_input_sparse_tensor(self, input_tensor):
"""sparsify input_tensor if dense."""
if not isinstance(input_tensor, sparse_tensor_py.SparseTensor):
# To avoid making any assumptions about which values are to be ignored,
# we set ignore_value to -1 for numeric tensors to avoid excluding valid
# indices.
if input_tensor.dtype == dtypes.string:
ignore_value = ""
else:
ignore_value = -1
input_tensor = _reshape_real_valued_tensor(input_tensor, 2, self.name)
input_tensor = contrib_sparse_ops.dense_to_sparse_tensor(
input_tensor, ignore_value=ignore_value)
return input_tensor
def is_compatible(self, other_column):
"""Check compatibility of two sparse columns."""
if self.lookup_config and other_column.lookup_config:
return self.lookup_config == other_column.lookup_config
compatible = (self.length == other_column.length and
(self.dtype == other_column.dtype or
(self.dtype.is_integer and other_column.dtype.is_integer)))
if compatible:
logging.warn("Column {} and {} may not have the same vocabulary.".
format(self.name, other_column.name))
return compatible
@abc.abstractmethod
def _do_transform(self, input_tensor):
pass
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
input_tensor = self._get_input_sparse_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = self._do_transform(input_tensor)
def _transform_feature(self, inputs):
input_tensor = self._get_input_sparse_tensor(inputs.get(self.name))
return self._do_transform(input_tensor)
@property
def _parse_example_spec(self):
return self.config
@property
def _num_buckets(self):
return self.length
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return fc_core._CategoricalColumn.IdWeightPair( # pylint: disable=protected-access
self.id_tensor(input_tensor), self.weight_tensor(input_tensor))
class _SparseColumnIntegerized(_SparseColumn):
"""See `sparse_column_with_integerized_feature`."""
def _do_transform(self, input_tensor):
sparse_id_values = math_ops.mod(input_tensor.values, self.bucket_size,
name="mod")
return sparse_tensor_py.SparseTensor(input_tensor.indices, sparse_id_values,
input_tensor.dense_shape)
def sparse_column_with_integerized_feature(column_name,
bucket_size,
combiner="sum",
dtype=dtypes.int64):
"""Creates an integerized _SparseColumn.
Use this when your features are already pre-integerized into int64 IDs, that
is, when the set of values to output is already coming in as what's desired in
the output. Integerized means we can use the feature value itself as id.
Typically this is used for reading contiguous ranges of integers indexes, but
it doesn't have to be. The output value is simply copied from the
input_feature, whatever it is. Just be aware, however, that if you have large
gaps of unused integers it might affect what you feed those in (for instance,
if you make up a one-hot tensor from these, the unused integers will appear as
values in the tensor which are always zero.)
Args:
column_name: A string defining sparse column name.
bucket_size: An int that is > 1. The number of buckets. It should be bigger
than maximum feature. In other words features in this column should be an
int64 in range [0, bucket_size)
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features. It should be an integer type. Default value is
dtypes.int64.
Returns:
An integerized _SparseColumn definition.
Raises:
ValueError: bucket_size is not greater than 1.
ValueError: dtype is not integer.
"""
return _SparseColumnIntegerized(
column_name, is_integerized=True, bucket_size=bucket_size,
combiner=combiner, dtype=dtype)
class _SparseColumnHashed(_SparseColumn):
"""See `sparse_column_with_hash_bucket`."""
def _do_transform(self, input_tensor):
if self.dtype.is_integer:
sparse_values = string_ops.as_string(input_tensor.values)
else:
sparse_values = input_tensor.values
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.bucket_size, name="lookup")
return sparse_tensor_py.SparseTensor(input_tensor.indices, sparse_id_values,
input_tensor.dense_shape)
def sparse_column_with_hash_bucket(column_name,
hash_bucket_size,
combiner="sum",
dtype=dtypes.string):
"""Creates a _SparseColumn with hashed bucket configuration.
Use this when your sparse features are in string or integer format, but you
don't have a vocab file that maps each value to an integer ID.
output_id = Hash(input_feature_string) % bucket_size
Args:
column_name: A string defining sparse column name.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A _SparseColumn with hashed bucket configuration
Raises:
ValueError: hash_bucket_size is not greater than 2.
ValueError: dtype is neither string nor integer.
"""
return _SparseColumnHashed(
column_name,
bucket_size=hash_bucket_size,
combiner=combiner,
dtype=dtype)
class _SparseColumnKeys(_SparseColumn):
"""See `sparse_column_with_keys`."""
def _do_transform(self, input_tensor):
table = lookup.index_table_from_tensor(
mapping=tuple(self.lookup_config.keys),
default_value=self.lookup_config.default_value,
dtype=self.dtype,
name="lookup")
return table.lookup(input_tensor)
def sparse_column_with_keys(
column_name, keys, default_value=-1, combiner="sum", dtype=dtypes.string):
"""Creates a _SparseColumn with keys.
Look up logic is as follows:
lookup_id = index_of_feature_in_keys if feature in keys else default_value
Args:
column_name: A string defining sparse column name.
keys: A list or tuple defining vocabulary. Must be castable to `dtype`.
default_value: The value to use for out-of-vocabulary feature values.
Default is -1.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features. Only integer and string are supported.
Returns:
A _SparseColumnKeys with keys configuration.
"""
keys = tuple(keys)
return _SparseColumnKeys(
column_name,
lookup_config=_SparseIdLookupConfig(
keys=keys, vocab_size=len(keys), default_value=default_value),
combiner=combiner,
dtype=dtype)
class _SparseColumnVocabulary(_SparseColumn):
"""See `sparse_column_with_vocabulary_file`."""
def _do_transform(self, st):
if self.dtype.is_integer:
sparse_string_values = string_ops.as_string(st.values)
sparse_string_tensor = sparse_tensor_py.SparseTensor(st.indices,
sparse_string_values,
st.dense_shape)
else:
sparse_string_tensor = st
table = lookup.index_table_from_file(
vocabulary_file=self.lookup_config.vocabulary_file,
num_oov_buckets=self.lookup_config.num_oov_buckets,
vocab_size=self.lookup_config.vocab_size,
default_value=self.lookup_config.default_value,
name=self.name + "_lookup")
return table.lookup(sparse_string_tensor)
def sparse_column_with_vocabulary_file(column_name,
vocabulary_file,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
combiner="sum",
dtype=dtypes.string):
"""Creates a _SparseColumn with vocabulary file configuration.
Use this when your sparse features are in string or integer format, and you
have a vocab file that maps each value to an integer ID.
output_id = LookupIdFromVocab(input_feature_string)
Args:
column_name: A string defining sparse column name.
vocabulary_file: The vocabulary filename.
num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of
vocabulary features will be ignored.
vocab_size: Number of the elements in the vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A _SparseColumn with vocabulary file configuration.
Raises:
ValueError: vocab_size is not defined.
ValueError: dtype is neither string nor integer.
"""
if vocab_size is None:
raise ValueError("vocab_size should be defined. "
"column_name: {}".format(column_name))
return _SparseColumnVocabulary(
column_name,
lookup_config=_SparseIdLookupConfig(
vocabulary_file=vocabulary_file,
num_oov_buckets=num_oov_buckets,
vocab_size=vocab_size,
default_value=default_value),
combiner=combiner,
dtype=dtype)
class _WeightedSparseColumn(
_FeatureColumn,
fc_core._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple("_WeightedSparseColumn",
["sparse_id_column", "weight_column_name",
"dtype"])):
"""See `weighted_sparse_column`."""
def __new__(cls, sparse_id_column, weight_column_name, dtype):
return super(_WeightedSparseColumn, cls).__new__(cls, sparse_id_column,
weight_column_name, dtype)
@property
def name(self):
return "{}_weighted_by_{}".format(self.sparse_id_column.name,
self.weight_column_name)
@property
def length(self):
"""Returns id size."""
return self.sparse_id_column.length
@property
def config(self):
config = _get_feature_config(self.sparse_id_column)
config.update(
{self.weight_column_name: parsing_ops.VarLenFeature(self.dtype)})
return config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor[0]
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return input_tensor[1]
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError(
"WeightedSparseColumn is not supported in DNN. "
"Please use embedding_column or one_hot_column. column: {}".format(
self))
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.id_tensor(input_tensor),
weight_tensor=self.weight_tensor(input_tensor),
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.sparse_id_column.combiner)
def _do_transform(self, id_tensor, weight_tensor):
if not isinstance(weight_tensor, sparse_tensor_py.SparseTensor):
# The weight tensor can be a regular Tensor. In such case, sparsify it.
weight_tensor = contrib_sparse_ops.dense_to_sparse_tensor(weight_tensor)
if not self.dtype.is_floating:
weight_tensor = math_ops.to_float(weight_tensor)
return tuple([id_tensor, weight_tensor])
def insert_transformed_feature(self, columns_to_tensors):
"""Inserts a tuple with the id and weight tensors."""
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
weight_tensor = columns_to_tensors[self.weight_column_name]
columns_to_tensors[self] = self._do_transform(
columns_to_tensors[self.sparse_id_column], weight_tensor)
def _transform_feature(self, inputs):
return self._do_transform(
inputs.get(self.sparse_id_column), inputs.get(self.weight_column_name))
@property
def _parse_example_spec(self):
return self.config
@property
def _num_buckets(self):
return self.length
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return fc_core._CategoricalColumn.IdWeightPair( # pylint: disable=protected-access
self.id_tensor(input_tensor), self.weight_tensor(input_tensor))
def is_compatible(self, other_column):
"""Check compatibility with other sparse column."""
if isinstance(other_column, _WeightedSparseColumn):
return self.sparse_id_column.is_compatible(other_column.sparse_id_column)
return self.sparse_id_column.is_compatible(other_column)
def weighted_sparse_column(sparse_id_column,
weight_column_name,
dtype=dtypes.float32):
"""Creates a _SparseColumn by combining sparse_id_column with a weight column.
Example:
```python
sparse_feature = sparse_column_with_hash_bucket(column_name="sparse_col",
hash_bucket_size=1000)
weighted_feature = weighted_sparse_column(sparse_id_column=sparse_feature,
weight_column_name="weights_col")
```
This configuration assumes that input dictionary of model contains the
following two items:
* (key="sparse_col", value=sparse_tensor) where sparse_tensor is
a SparseTensor.
* (key="weights_col", value=weights_tensor) where weights_tensor
is a SparseTensor.
Following are assumed to be true:
* sparse_tensor.indices = weights_tensor.indices
* sparse_tensor.dense_shape = weights_tensor.dense_shape
Args:
sparse_id_column: A `_SparseColumn` which is created by
`sparse_column_with_*` functions.
weight_column_name: A string defining a sparse column name which represents
weight or value of the corresponding sparse id feature.
dtype: Type of weights, such as `tf.float32`. Only floating and integer
weights are supported.
Returns:
A _WeightedSparseColumn composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if dtype is not convertible to float.
"""
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype is not convertible to float. Given {}".format(
dtype))
return _WeightedSparseColumn(sparse_id_column, weight_column_name, dtype)
class _OneHotColumn(
_FeatureColumn,
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple("_OneHotColumn", ["sparse_id_column"])):
"""Represents a one-hot column for use in deep networks.
Args:
sparse_id_column: A _SparseColumn which is created by `sparse_column_with_*`
function.
"""
@property
def name(self):
return "{}_one_hot".format(self.sparse_id_column.name)
@property
def length(self):
"""Returns vocabulary or hash_bucket size."""
return self.sparse_id_column.length
@property
def config(self):
"""Returns the parsing config of the origin column."""
return _get_feature_config(self.sparse_id_column)
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
"""Used by the Transformer to prevent double transformations."""
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column]
def _to_dnn_input_layer(self,
transformed_input_tensor,
unused_weight_collections=None,
unused_trainable=False,
output_rank=2):
"""Returns a Tensor as an input to the first layer of neural network.
Args:
transformed_input_tensor: A tensor that has undergone the transformations
in `insert_transformed_feature`. Rank should be >= `output_rank`.
unused_weight_collections: Unused. One hot encodings are not variable.
unused_trainable: Unused. One hot encodings are not trainable.
output_rank: the desired rank of the output `Tensor`.
Returns:
A multi-hot Tensor to be fed into the first layer of neural network.
Raises:
ValueError: When using one_hot_column with weighted_sparse_column.
This is not yet supported.
"""
# Reshape ID column to `output_rank`.
sparse_id_column = self.sparse_id_column.id_tensor(transformed_input_tensor)
# pylint: disable=protected-access
sparse_id_column = layers._inner_flatten(sparse_id_column, output_rank)
weight_tensor = self.sparse_id_column.weight_tensor(
transformed_input_tensor)
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(sp_ids=sparse_id_column,
sp_values=weight_tensor,
vocab_size=self.length)
# Remove (?, -1) index
weighted_column = sparse_ops.sparse_slice(
weighted_column,
[0, 0],
weighted_column.dense_shape)
return sparse_ops.sparse_tensor_to_dense(weighted_column)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(sparse_id_column,
default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor, depth=self.length, on_value=1.0, off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(
one_hot_id_tensor, reduction_indices=[output_rank - 1])
@property
def _variable_shape(self):
return tensor_shape.TensorShape([self.length])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
return inputs.get(self)
def _transform_feature(self, inputs):
return self._to_dnn_input_layer(inputs.get(self.sparse_id_column))
@property
def _parse_example_spec(self):
return self.config
class _EmbeddingColumn(
_FeatureColumn,
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple("_EmbeddingColumn", [
"sparse_id_column", "dimension", "combiner", "initializer",
"ckpt_to_load_from", "tensor_name_in_ckpt", "shared_embedding_name",
"shared_vocab_size", "max_norm", "trainable"
])):
"""Represents an embedding column.
Args:
sparse_id_column: A `_SparseColumn` which is created by
`sparse_column_with_*` or `weighted_sparse_column` functions.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_column.length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
shared_embedding_name: (Optional). The common name for shared embedding.
shared_vocab_size: (Optional). The common vocab_size used for shared
embedding space.
max_norm: (Optional). If not None, embedding values are l2-normalized to
the value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True.
Raises:
ValueError: if `initializer` is specified and is not callable. Also,
if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified.
"""
def __new__(cls,
sparse_id_column,
dimension,
combiner="mean",
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
shared_embedding_name=None,
shared_vocab_size=None,
max_norm=None,
trainable=True):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified. "
"Embedding of column_name: {}".format(
sparse_id_column.name))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError("Must specify both `ckpt_to_load_from` and "
"`tensor_name_in_ckpt` or none of them.")
if initializer is None:
logging.warn("The default stddev value of initializer will change from "
"\"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" after "
"2017/02/25.")
stddev = 1 / math.sqrt(sparse_id_column.length)
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=stddev)
return super(_EmbeddingColumn, cls).__new__(cls, sparse_id_column,
dimension, combiner,
initializer, ckpt_to_load_from,
tensor_name_in_ckpt,
shared_embedding_name,
shared_vocab_size,
max_norm,
trainable)
@property
def name(self):
if self.shared_embedding_name is None:
return "{}_embedding".format(self.sparse_id_column.name)
else:
return "{}_shared_embedding".format(self.sparse_id_column.name)
@property
def length(self):
"""Returns id size."""
if self.shared_vocab_size is None:
return self.sparse_id_column.length
else:
return self.shared_vocab_size
@property
def config(self):
return _get_feature_config(self.sparse_id_column)
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["initializer"])
def insert_transformed_feature(self, columns_to_tensors):
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column]
def _deep_embedding_lookup_arguments(self, input_tensor):
return _DeepEmbeddingLookupArguments(
input_tensor=self.sparse_id_column.id_tensor(input_tensor),
weight_tensor=self.sparse_id_column.weight_tensor(input_tensor),
vocab_size=self.length,
dimension=self.dimension,
initializer=self.initializer,
combiner=self.combiner,
shared_embedding_name=self.shared_embedding_name,
hash_key=None,
max_norm=self.max_norm,
trainable=self.trainable)
def _checkpoint_path(self):
if self.ckpt_to_load_from is not None:
return self.ckpt_to_load_from, self.tensor_name_in_ckpt
return None
# pylint: disable=unused-argument
def _wide_embedding_lookup_arguments(self, input_tensor):
raise ValueError("Column {} is not supported in linear models. "
"Please use sparse_column.".format(self))
@property
def _variable_shape(self):
return tensor_shape.TensorShape([self.dimension])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return _embeddings_from_arguments(
self,
self._deep_embedding_lookup_arguments(inputs.get(self)),
weight_collections, trainable)
def _transform_feature(self, inputs):
return inputs.get(self.sparse_id_column)
@property
def _parse_example_spec(self):
return self.config
def _is_variable(v):
"""Returns true if `v` is a variable."""
return isinstance(v, (variables.Variable,
resource_variable_ops.ResourceVariable))
def _embeddings_from_arguments(column,
args,
weight_collections,
trainable,
output_rank=2):
"""Returns embeddings for a column based on the computed arguments.
Args:
column: the column name.
args: the _DeepEmbeddingLookupArguments for this column.
weight_collections: collections to store weights in.
trainable: whether these embeddings should be trainable.
output_rank: the desired rank of the returned `Tensor`. Inner dimensions will
be combined to produce the desired rank.
Returns:
the embeddings.
Raises:
ValueError: if not possible to create.
"""
# pylint: disable=protected-access
input_tensor = layers._inner_flatten(args.input_tensor, output_rank)
weight_tensor = None
if args.weight_tensor is not None:
weight_tensor = layers._inner_flatten(args.weight_tensor, output_rank)
# pylint: enable=protected-access
# This option is only enabled for scattered_embedding_column.
if args.hash_key:
embeddings = contrib_variables.model_variable(
name="weights",
shape=[args.vocab_size],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
return embedding_ops.scattered_embedding_lookup_sparse(
embeddings,
input_tensor,
args.dimension,
hash_key=args.hash_key,
combiner=args.combiner,
name="lookup")
if args.shared_embedding_name is not None:
shared_embedding_collection_name = (
"SHARED_EMBEDDING_COLLECTION_" + args.shared_embedding_name.upper())
graph = ops.get_default_graph()
shared_embedding_collection = (
graph.get_collection_ref(shared_embedding_collection_name))
shape = [args.vocab_size, args.dimension]
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError(
"Collection %s can only contain one "
"(partitioned) variable." % shared_embedding_collection_name)
else:
embeddings = shared_embedding_collection[0]
if embeddings.get_shape() != shape:
raise ValueError(
"The embedding variable with name {} already "
"exists, but its shape does not match required "
"embedding shape here. Please make sure to use "
"different shared_embedding_name for different "
"shared embeddings.".format(args.shared_embedding_name))
else:
embeddings = contrib_variables.model_variable(
name=args.shared_embedding_name,
shape=shape,
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
graph.add_to_collection(shared_embedding_collection_name, embeddings)
else:
embeddings = contrib_variables.model_variable(
name="weights",
shape=[args.vocab_size, args.dimension],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
if _is_variable(embeddings):
embeddings = [embeddings]
else:
embeddings = embeddings._get_variable_list() # pylint: disable=protected-access
# pylint: disable=protected-access
_maybe_restore_from_checkpoint(column._checkpoint_path(), embeddings)
return embedding_ops.safe_embedding_lookup_sparse(
embeddings,
input_tensor,
sparse_weights=weight_tensor,
combiner=args.combiner,
name=column.name + "weights",
max_norm=args.max_norm)
def _maybe_restore_from_checkpoint(checkpoint_path, variable):
if checkpoint_path is not None:
path, tensor_name = checkpoint_path
weights_to_restore = variable
if len(variable) == 1:
weights_to_restore = variable[0]
checkpoint_utils.init_from_checkpoint(path,
{tensor_name: weights_to_restore})
def one_hot_column(sparse_id_column):
"""Creates an `_OneHotColumn` for a one-hot or multi-hot repr in a DNN.
Args:
sparse_id_column: A _SparseColumn which is created by
`sparse_column_with_*`
or crossed_column functions. Note that `combiner` defined in
`sparse_id_column` is ignored.
Returns:
An _OneHotColumn.
"""
return _OneHotColumn(sparse_id_column)
def embedding_column(sparse_id_column,
dimension,
combiner="mean",
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""Creates an `_EmbeddingColumn` for feeding sparse data into a DNN.
Args:
sparse_id_column: A `_SparseColumn` which is created by for example
`sparse_column_with_*` or crossed_column functions. Note that `combiner`
defined in `sparse_id_column` is ignored.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_column.length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
max_norm: (Optional). If not None, embedding values are l2-normalized to
the value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True
Returns:
An `_EmbeddingColumn`.
"""
return _EmbeddingColumn(sparse_id_column, dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt,
max_norm=max_norm, trainable=trainable)
def shared_embedding_columns(sparse_id_columns,
dimension,
combiner="mean",
shared_embedding_name=None,
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""Creates a list of `_EmbeddingColumn` sharing the same embedding.
Args:
sparse_id_columns: An iterable of `_SparseColumn`, such as those created by
`sparse_column_with_*` or crossed_column functions. Note that `combiner`
defined in each sparse_id_column is ignored.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
shared_embedding_name: (Optional). A string specifying the name of shared
embedding weights. This will be needed if you want to reference the shared
embedding separately from the generated `_EmbeddingColumn`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_columns[0].length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
max_norm: (Optional). If not None, embedding values are l2-normalized to
the value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True
Returns:
A tuple of `_EmbeddingColumn` with shared embedding space.
Raises:
ValueError: if sparse_id_columns is empty, or its elements are not
compatible with each other.
TypeError: if `sparse_id_columns` is not a sequence or is a string. If at
least one element of `sparse_id_columns` is not a `SparseColumn` or a
`WeightedSparseColumn`.
"""
if (not isinstance(sparse_id_columns, collections.Sequence) or
isinstance(sparse_id_columns, six.string_types)):
raise TypeError(
"sparse_id_columns must be a non-string sequence (ex: list or tuple) "
"instead of type {}.".format(type(sparse_id_columns)))
if len(sparse_id_columns) < 1:
raise ValueError("The input sparse_id_columns should have at least one "
"element.")
for sparse_id_column in sparse_id_columns:
if not (isinstance(sparse_id_column, _SparseColumn) or
isinstance(sparse_id_column, _WeightedSparseColumn)):
raise TypeError("Elements of sparse_id_columns must be _SparseColumn or "
"_WeightedSparseColumn, but {} is not."
.format(sparse_id_column))
if len(sparse_id_columns) == 1:
return [
_EmbeddingColumn(sparse_id_columns[0], dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt,
shared_embedding_name, max_norm=max_norm,
trainable=trainable)]
else:
# Check compatibility of sparse_id_columns
compatible = True
for column in sparse_id_columns[1:]:
if isinstance(sparse_id_columns[0], _WeightedSparseColumn):
compatible = compatible and sparse_id_columns[0].is_compatible(column)
else:
compatible = compatible and column.is_compatible(sparse_id_columns[0])
if not compatible:
raise ValueError("The input sparse id columns are not compatible.")
# Construct the shared name and size for shared embedding space.
if not shared_embedding_name:
# Sort the columns so that shared_embedding_name will be deterministic
# even if users pass in unsorted columns from a dict or something.
# Since they are different classes, ordering is SparseColumns first,
# then WeightedSparseColumns.
sparse_columns = []
weighted_sparse_columns = []
for column in sparse_id_columns:
if isinstance(column, _SparseColumn):
sparse_columns.append(column)
else:
weighted_sparse_columns.append(column)
sorted_columns = sorted(sparse_columns) + sorted(
weighted_sparse_columns, key=lambda x: x.name)
if len(sorted_columns) <= 3:
shared_embedding_name = "_".join([column.name
for column in sorted_columns])
else:
shared_embedding_name = "_".join([column.name
for column in sorted_columns[0:3]])
shared_embedding_name += (
"_plus_{}_others".format(len(sorted_columns) - 3))
shared_embedding_name += "_shared_embedding"
shared_vocab_size = sparse_id_columns[0].length
embedded_columns = []
for column in sparse_id_columns:
embedded_columns.append(
_EmbeddingColumn(column, dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt,
shared_embedding_name, shared_vocab_size,
max_norm=max_norm, trainable=trainable))
return tuple(embedded_columns)
class _ScatteredEmbeddingColumn(
_FeatureColumn,
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple("_ScatteredEmbeddingColumn", [
"column_name", "size", "dimension", "hash_key", "combiner",
"initializer"
])):
"""See `scattered_embedding_column`."""
def __new__(cls,
column_name,
size,
dimension,
hash_key,
combiner="sqrtn",
initializer=None):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified. "
"column_name: {}".format(column_name))
if initializer is None:
logging.warn("The default stddev value of initializer will change from "
"\"0.1\" to \"1/sqrt(dimension)\" after 2017/02/25.")
stddev = 0.1
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=stddev)
return super(_ScatteredEmbeddingColumn, cls).__new__(cls, column_name, size,
dimension, hash_key,
combiner,
initializer)
@property
def name(self):
return "{}_scattered_embedding".format(self.column_name)
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(dtypes.string)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["initializer"])
def insert_transformed_feature(self, columns_to_tensors):
columns_to_tensors[self] = columns_to_tensors[self.column_name]
def _deep_embedding_lookup_arguments(self, input_tensor):
return _DeepEmbeddingLookupArguments(
input_tensor=input_tensor,
weight_tensor=None,
vocab_size=self.size,
initializer=self.initializer,
combiner=self.combiner,
dimension=self.dimension,
shared_embedding_name=None,
hash_key=self.hash_key,
max_norm=None,
trainable=True)
@property
def _variable_shape(self):
return tensor_shape.TensorShape([self.dimension])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return _embeddings_from_arguments(
self,
self._deep_embedding_lookup_arguments(inputs.get(self)),
weight_collections, trainable)
def _transform_feature(self, inputs):
return inputs.get(self.column_name)
@property
def _parse_example_spec(self):
return self.config
def scattered_embedding_column(column_name,
size,
dimension,
hash_key,
combiner="mean",
initializer=None):
"""Creates an embedding column of a sparse feature using parameter hashing.
This is a useful shorthand when you have a sparse feature you want to use an
embedding for, but also want to hash the embedding's values in each dimension
to a variable based on a different hash.
Specifically, the i-th embedding component of a value v is found by retrieving
an embedding weight whose index is a fingerprint of the pair (v,i).
An embedding column with sparse_column_with_hash_bucket such as
embedding_column(
sparse_column_with_hash_bucket(column_name, bucket_size),
dimension)
could be replaced by
scattered_embedding_column(
column_name,
size=bucket_size * dimension,
dimension=dimension,
hash_key=tf.contrib.layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
for the same number of embedding parameters. This should hopefully reduce the
impact of collisions, but adds the cost of slowing down training.
Args:
column_name: A string defining sparse column name.
size: An integer specifying the number of parameters in the embedding layer.
dimension: An integer specifying dimension of the embedding.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0 and standard deviation 0.1.
Returns:
A _ScatteredEmbeddingColumn.
Raises:
ValueError: if dimension or size is not a positive integer; or if combiner
is not supported.
"""
if (dimension < 1) or (size < 1):
raise ValueError("Dimension and size must be greater than 0. "
"dimension: {}, size: {}, column_name: {}".format(
dimension, size, column_name))
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'. "
"combiner: {}, column_name: {}".format(combiner,
column_name))
return _ScatteredEmbeddingColumn(column_name, size, dimension, hash_key,
combiner, initializer)
def _reshape_real_valued_tensor(input_tensor, output_rank, column_name=None):
"""Reshaping logic for dense, numeric `Tensors`.
Follows the following rules:
1. If `output_rank > input_rank + 1` raise a `ValueError`.
2. If `output_rank == input_rank + 1`, expand `input_tensor` by one
dimension and return
3. If `output_rank == input_rank`, return `input_tensor`.
4. If `output_rank < input_rank`, flatten the inner dimensions of
`input_tensor` and return a `Tensor` with `output_rank`
Args:
input_tensor: a dense `Tensor` to be reshaped.
output_rank: the desired rank of the reshaped `Tensor`.
column_name: (optional) the name of the associated column. Used for error
messages.
Returns:
A `Tensor` with the same entries as `input_tensor` and rank `output_rank`.
Raises:
ValueError: if `output_rank > input_rank + 1`.
"""
input_rank = input_tensor.get_shape().ndims
if input_rank is not None:
if output_rank > input_rank + 1:
error_string = ("Rank of input Tensor ({}) should be the same as "
"output_rank ({}). For example, sequence data should "
"typically be 3 dimensional (rank 3) while non-sequence "
"data is typically 2 dimensional (rank 2).".format(
input_rank, output_rank))
if column_name is not None:
error_string = ("Error while processing column {}.".format(column_name)
+ error_string)
raise ValueError(error_string)
if output_rank == input_rank + 1:
logging.warning(
"Rank of input Tensor ({}) should be the same as output_rank ({}) "
"for column. Will attempt to expand dims. It is highly recommended "
"that you resize your input, as this behavior may change.".format(
input_rank, output_rank))
return array_ops.expand_dims(input_tensor, -1, name="expand_dims")
if output_rank == input_rank:
return input_tensor
# Here, either `input_rank` is unknown or it is greater than `output_rank`.
return layers._inner_flatten(input_tensor, output_rank) # pylint: disable=protected-access
class _RealValuedVarLenColumn(_FeatureColumn, collections.namedtuple(
"_RealValuedVarLenColumn",
["column_name", "default_value", "dtype", "normalizer", "is_sparse"])):
"""Represents a real valued feature column for variable length Features.
Instances of this class are immutable.
If is_sparse=False, the dictionary returned by InputBuilder contains a
("column_name", Tensor) pair with a Tensor shape of (batch_size, dimension).
If is_sparse=True, the dictionary contains a ("column_name", SparseTensor)
pair instead with shape inferred after parsing.
"""
@property
def name(self):
return self.column_name
@property
def config(self):
if self.is_sparse:
return {self.column_name: parsing_ops.VarLenFeature(self.dtype)}
else:
return {self.column_name: parsing_ops.FixedLenSequenceFeature(
[], self.dtype, allow_missing=True,
default_value=self.default_value)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["normalizer"])
@property
def normalizer_fn(self):
"""Returns the function used to normalize the column."""
return self.normalizer
def _normalized_input_tensor(self, input_tensor):
"""Returns the input tensor after custom normalization is applied."""
if self.normalizer is None:
return input_tensor
if self.is_sparse:
return sparse_tensor_py.SparseTensor(
input_tensor.indices,
self.normalizer(input_tensor.values),
input_tensor.dense_shape)
else:
return self.normalizer(input_tensor)
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
# Transform the input tensor according to the normalizer function.
input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = math_ops.to_float(input_tensor)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
return _reshape_real_valued_tensor(
self._to_dense_tensor(input_tensor), output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
if not self.is_sparse:
return input_tensor
raise ValueError("Set is_sparse to False if you want a dense Tensor for "
"column_name: {}".format(self.name))
@experimental
def _real_valued_var_len_column(column_name,
default_value=None,
dtype=dtypes.float32,
normalizer=None,
is_sparse=False):
"""Creates a `_RealValuedVarLenColumn` for variable-length numeric data.
Note, this is not integrated with any of the DNNEstimators, except the RNN
ones DynamicRNNEstimator and the StateSavingRNNEstimator.
It can either create a parsing config for a SparseTensor (with is_sparse=True)
or a padded Tensor.
The (dense_)shape of the result will be [batch_size, None], which can be used
with is_sparse=False as input into an RNN (see DynamicRNNEstimator or
StateSavingRNNEstimator) or with is_sparse=True as input into a tree (see
gtflow).
Use real_valued_column if the Feature has a fixed length. Use some
SparseColumn for columns to be embedded / one-hot-encoded.
Args:
column_name: A string defining real valued column name.
default_value: A scalar value compatible with dtype. Needs to be specified
if is_sparse=False.
dtype: Defines the type of values. Default value is tf.float32. Needs to be
convertible to tf.float32.
normalizer: If not None, a function that can be used to normalize the value
of the real valued column after default_value is applied for parsing.
Normalizer function takes the input tensor as its argument, and returns
the output tensor. (e.g. lambda x: (x - 3.0) / 4.2). Note that for
is_sparse=False, the normalizer will be run on the values of the
`SparseTensor`.
is_sparse: A boolean defining whether to create a SparseTensor or a Tensor.
Returns:
A _RealValuedSparseColumn.
Raises:
TypeError: if default_value is not a scalar value compatible with dtype.
TypeError: if dtype is not convertible to tf.float32.
ValueError: if default_value is None and is_sparse is False.
"""
if not (dtype.is_integer or dtype.is_floating):
raise TypeError("dtype must be convertible to float. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if default_value is None and not is_sparse:
raise ValueError("default_value must be provided when is_sparse=False to "
"parse a padded Tensor. "
"column_name: {}".format(column_name))
if isinstance(default_value, list):
raise ValueError(
"Only scalar default value. default_value: {}, column_name: {}".format(
default_value, column_name))
if default_value is not None:
if dtype.is_integer:
default_value = int(default_value)
elif dtype.is_floating:
default_value = float(default_value)
return _RealValuedVarLenColumn(column_name, default_value, dtype, normalizer,
is_sparse)
class _RealValuedColumn(
_FeatureColumn,
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple(
"_RealValuedColumn",
["column_name", "dimension", "default_value", "dtype", "normalizer"])):
"""Represents a real valued feature column also known as continuous features.
Instances of this class are immutable. The dictionary returned by InputBuilder
contains a ("column_name", Tensor) pair with a Tensor shape of
(batch_size, dimension).
"""
def __new__(cls, column_name, dimension, default_value,
dtype, normalizer):
if default_value is not None:
default_value = tuple(default_value)
return super(_RealValuedColumn, cls).__new__(cls, column_name, dimension,
default_value, dtype,
normalizer)
@property
def name(self):
return self.column_name
@property
def config(self):
default_value = self.default_value
if default_value is not None:
default_value = list(default_value)
return {self.column_name: parsing_ops.FixedLenFeature([self.dimension],
self.dtype,
default_value)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["normalizer"])
@property
def normalizer_fn(self):
"""Returns the function used to normalize the column."""
return self.normalizer
def _normalized_input_tensor(self, input_tensor):
"""Returns the input tensor after custom normalization is applied."""
return (self.normalizer(input_tensor) if self.normalizer is not None else
input_tensor)
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
# Transform the input tensor according to the normalizer function.
input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = math_ops.to_float(input_tensor)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
input_tensor = self._to_dense_tensor(input_tensor)
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.to_float(input_tensor)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
return input_tensor
@property
def _variable_shape(self):
return tensor_shape.TensorShape([self.dimension])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
return inputs.get(self)
def _transform_feature(self, inputs):
return math_ops.to_float(
self._normalized_input_tensor(inputs.get(self.name)))
@property
def _parse_example_spec(self):
return self.config
def real_valued_column(column_name,
dimension=1,
default_value=None,
dtype=dtypes.float32,
normalizer=None):
"""Creates a `_RealValuedColumn` for dense numeric data.
Args:
column_name: A string defining real valued column name.
dimension: An integer specifying dimension of the real valued column.
The default is 1.
default_value: A single value compatible with dtype or a list of values
compatible with dtype which the column takes on during tf.Example parsing
if data is missing. When dimension is not None, a default value of None
will cause tf.parse_example to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every dimension. If a list of values is provided,
the length of the list should be equal to the value of `dimension`.
Only scalar default value is supported in case dimension is not specified.
dtype: defines the type of values. Default value is tf.float32. Must be a
non-quantized, real integer or floating point type.
normalizer: If not None, a function that can be used to normalize the value
of the real valued column after default_value is applied for parsing.
Normalizer function takes the input tensor as its argument, and returns
the output tensor. (e.g. lambda x: (x - 3.0) / 4.2). Note that for
variable length columns, the normalizer should expect an input_tensor of
type `SparseTensor`.
Returns:
A _RealValuedColumn.
Raises:
TypeError: if dimension is not an int
ValueError: if dimension is not a positive integer
TypeError: if default_value is a list but its length is not equal to the
value of `dimension`.
TypeError: if default_value is not compatible with dtype.
ValueError: if dtype is not convertible to tf.float32.
"""
if dimension is None:
raise TypeError("dimension must be an integer. Use the "
"_real_valued_var_len_column for variable length features."
"dimension: {}, column_name: {}".format(dimension,
column_name))
if not isinstance(dimension, int):
raise TypeError("dimension must be an integer. "
"dimension: {}, column_name: {}".format(dimension,
column_name))
if dimension < 1:
raise ValueError("dimension must be greater than 0. "
"dimension: {}, column_name: {}".format(dimension,
column_name))
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype must be convertible to float. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if default_value is None:
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, int):
if dtype.is_integer:
default_value = ([default_value for _ in range(dimension)] if dimension
else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if dtype.is_floating:
default_value = float(default_value)
default_value = ([default_value for _ in range(dimension)] if dimension
else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, float):
if dtype.is_floating and (not dtype.is_integer):
default_value = ([default_value for _ in range(dimension)] if dimension
else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, list):
if len(default_value) != dimension:
raise ValueError(
"The length of default_value must be equal to dimension. "
"default_value: {}, dimension: {}, column_name: {}".format(
default_value, dimension, column_name))
# Check if the values in the list are all integers or are convertible to
# floats.
is_list_all_int = True
is_list_all_float = True
for v in default_value:
if not isinstance(v, int):
is_list_all_int = False
if not (isinstance(v, float) or isinstance(v, int)):
is_list_all_float = False
if is_list_all_int:
if dtype.is_integer:
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
elif dtype.is_floating:
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if is_list_all_float:
if dtype.is_floating and (not dtype.is_integer):
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
raise TypeError("default_value must be compatible with dtype. "
"default_value: {}, dtype: {}, column_name: {}".format(
default_value, dtype, column_name))
class _BucketizedColumn(
_FeatureColumn,
fc_core._CategoricalColumn, # pylint: disable=protected-access
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple("_BucketizedColumn", ["source_column",
"boundaries"])):
"""Represents a bucketization transformation also known as binning.
Instances of this class are immutable. Values in `source_column` will be
bucketized based on `boundaries`.
For example, if the inputs are:
boundaries = [0, 10, 100]
source_column = [[-5], [150], [10], [0], [4], [19]]
then the bucketized feature will be:
output = [[0], [3], [2], [1], [1], [2]]
Attributes:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list or tuple of floats specifying the boundaries. It has to
be sorted. [a, b, c] defines following buckets: (-inf., a), [a, b),
[b, c), [c, inf.)
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
def __new__(cls, source_column, boundaries):
if not isinstance(source_column, _RealValuedColumn):
raise TypeError("source_column must be an instance of _RealValuedColumn. "
"source_column: {}".format(source_column))
if source_column.dimension is None:
raise ValueError("source_column must have a defined dimension. "
"source_column: {}".format(source_column))
if (not isinstance(boundaries, list) and
not isinstance(boundaries, tuple)) or not boundaries:
raise ValueError("boundaries must be a non-empty list or tuple. "
"boundaries: {}".format(boundaries))
# We allow bucket boundaries to be monotonically increasing
# (ie a[i+1] >= a[i]). When two bucket boundaries are the same, we
# de-duplicate.
sanitized_boundaries = []
for i in range(len(boundaries) - 1):
if boundaries[i] == boundaries[i + 1]:
continue
elif boundaries[i] < boundaries[i + 1]:
sanitized_boundaries.append(boundaries[i])
else:
raise ValueError("boundaries must be a sorted list. "
"boundaries: {}".format(boundaries))
sanitized_boundaries.append(boundaries[len(boundaries) - 1])
return super(_BucketizedColumn, cls).__new__(cls, source_column,
tuple(sanitized_boundaries))
@property
def name(self):
return "{}_bucketized".format(self.source_column.name)
@property
def length(self):
"""Returns total number of buckets."""
return len(self.boundaries) + 1
@property
def config(self):
return self.source_column.config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
if output_rank != 2:
raise ValueError("BucketizedColumn currently only supports output_rank=2")
return array_ops.reshape(
array_ops.one_hot(
math_ops.to_int64(input_tensor),
self.length,
1.,
0.,
name="one_hot"), [-1, self.length * self.source_column.dimension],
name="reshape")
def to_sparse_tensor(self, input_tensor):
"""Creates a SparseTensor from the bucketized Tensor."""
dimension = self.source_column.dimension
batch_size = array_ops.shape(input_tensor, name="shape")[0]
if dimension > 1:
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(
math_ops.range(0, batch_size), 1, name="expand_dims"),
[1, dimension],
name="tile"), [-1],
name="reshape")
i2 = array_ops.tile(
math_ops.range(0, dimension), [batch_size], name="tile")
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = array_ops.reshape(
input_tensor, [-1], name="reshape") + self.length * i2
else:
# Simpler indices when dimension=1
i1 = math_ops.range(0, batch_size)
i2 = array_ops.zeros([batch_size], dtype=dtypes.int32, name="zeros")
bucket_indices = array_ops.reshape(input_tensor, [-1], name="reshape")
indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2))))
shape = math_ops.to_int64(array_ops.stack([batch_size, dimension]))
sparse_id_values = sparse_tensor_py.SparseTensor(
indices, bucket_indices, shape)
return sparse_id_values
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.to_sparse_tensor(input_tensor),
weight_tensor=None,
vocab_size=self.length * self.source_column.dimension,
initializer=init_ops.zeros_initializer(),
combiner="sum")
def _transform_feature(self, inputs):
"""Handles cross transformation."""
# Bucketize the source column.
return bucketization_op.bucketize(
inputs.get(self.source_column),
boundaries=list(self.boundaries),
name="bucketize")
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
columns_to_tensors[self] = self._transform_feature(
_LazyBuilderByColumnsToTensor(columns_to_tensors))
@property
def _parse_example_spec(self):
return self.config
@property
def _num_buckets(self):
return self.length * self.source_column.dimension
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return fc_core._CategoricalColumn.IdWeightPair( # pylint: disable=protected-access
self.to_sparse_tensor(inputs.get(self)), None)
@property
def _variable_shape(self):
return tensor_shape.TensorShape(
[self.length * self.source_column.dimension])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return self._to_dnn_input_layer(
inputs.get(self), weight_collections, trainable)
def bucketized_column(source_column, boundaries):
"""Creates a _BucketizedColumn for discretizing dense input.
Args:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list or tuple of floats specifying the boundaries. It has to
be sorted.
Returns:
A _BucketizedColumn.
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
return _BucketizedColumn(source_column, boundaries)
class _CrossedColumn(
_FeatureColumn,
fc_core._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple("_CrossedColumn", [
"columns", "hash_bucket_size", "hash_key", "combiner",
"ckpt_to_load_from", "tensor_name_in_ckpt"
])):
"""Represents a cross transformation also known as conjunction or combination.
Instances of this class are immutable. It crosses given `columns`. Crossed
column output will be hashed to hash_bucket_size.
Conceptually, transformation can be thought as:
Hash(cartesian product of features in columns) % `hash_bucket_size`
For example, if the columns are
SparseTensor referred by first column: shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
SparseTensor referred by second column: : shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
then crossed feature will look like:
shape = [2, 2]
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
Attributes:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column::
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
Raises:
TypeError: if all items in columns are not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn.
ValueError: if hash_bucket_size is not > 1 or len(columns) is not > 1. Also,
if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified.
"""
@staticmethod
def _assert_is_crossable(column):
if isinstance(column, (_SparseColumn, _CrossedColumn, _BucketizedColumn)):
return
raise TypeError("columns must be a set of _SparseColumn, "
"_CrossedColumn, or _BucketizedColumn instances. "
"(column {} is a {})".format(column,
column.__class__.__name__))
def __new__(cls,
columns,
hash_bucket_size,
hash_key,
combiner="sum",
ckpt_to_load_from=None,
tensor_name_in_ckpt=None):
for column in columns:
_CrossedColumn._assert_is_crossable(column)
if len(columns) < 2:
raise ValueError("columns must contain at least 2 elements. "
"columns: {}".format(columns))
if hash_bucket_size < 2:
raise ValueError("hash_bucket_size must be at least 2. "
"hash_bucket_size: {}".format(hash_bucket_size))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError("Must specify both `ckpt_to_load_from` and "
"`tensor_name_in_ckpt` or none of them.")
sorted_columns = sorted(
[column for column in columns], key=lambda column: column.name)
return super(_CrossedColumn, cls).__new__(cls, tuple(sorted_columns),
hash_bucket_size, hash_key,
combiner,
ckpt_to_load_from,
tensor_name_in_ckpt)
@property
def name(self):
sorted_names = sorted([column.name for column in self.columns])
return "_X_".join(sorted_names)
@property
def config(self):
config = {}
for column in self.columns:
config.update(_get_feature_config(column))
return config
@property
def length(self):
"""Returns total number of buckets."""
return self.hash_bucket_size
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
del input_tensor
return None
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
del input_tensor
del weight_collections
del trainable
del output_rank
raise ValueError("CrossedColumn is not supported in DNN. "
"Please use embedding_column. column: {}".format(self))
def _checkpoint_path(self):
if self.ckpt_to_load_from is not None:
return self.ckpt_to_load_from, self.tensor_name_in_ckpt
return None
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=input_tensor,
weight_tensor=None,
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.combiner)
def _transform_feature(self, inputs):
"""Handles cross transformation."""
def _collect_leaf_level_columns(cross):
"""Collects base columns contained in the cross."""
leaf_level_columns = []
for c in cross.columns:
if isinstance(c, _CrossedColumn):
leaf_level_columns.extend(_collect_leaf_level_columns(c))
else:
leaf_level_columns.append(c)
return leaf_level_columns
feature_tensors = []
for c in _collect_leaf_level_columns(self):
if isinstance(c, _SparseColumn):
feature_tensors.append(inputs.get(c.name))
else:
if isinstance(c, _BucketizedColumn):
feature_tensors.append(c.to_sparse_tensor(inputs.get(c)))
else:
feature_tensors.append(inputs.get(c))
return sparse_feature_cross_op.sparse_feature_cross(
feature_tensors,
hashed_output=True,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key,
name="cross")
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
columns_to_tensors[self] = self._transform_feature(
_LazyBuilderByColumnsToTensor(columns_to_tensors))
@property
def _parse_example_spec(self):
return self.config
@property
def _num_buckets(self):
return self.length
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return fc_core._CategoricalColumn.IdWeightPair(inputs.get(self), None) # pylint: disable=protected-access
class _LazyBuilderByColumnsToTensor(object):
def __init__(self, columns_to_tensors):
self._columns_to_tensors = columns_to_tensors
def get(self, key):
"""Gets the transformed feature column."""
if key in self._columns_to_tensors:
return self._columns_to_tensors[key]
if isinstance(key, str):
raise ValueError(
"features dictionary doesn't contain key ({})".format(key))
if not isinstance(key, _FeatureColumn):
raise TypeError('"key" must be either a "str" or "_FeatureColumn". '
"Provided: {}".format(key))
key.insert_transformed_feature(self._columns_to_tensors)
return self._columns_to_tensors[key]
def crossed_column(columns, hash_bucket_size, combiner="sum",
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
hash_key=None):
"""Creates a _CrossedColumn for performing feature crosses.
Args:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column::
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
A _CrossedColumn.
Raises:
TypeError: if any item in columns is not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn, or
hash_bucket_size is not an int.
ValueError: if hash_bucket_size is not > 1 or
len(columns) is not > 1.
"""
return _CrossedColumn(
columns,
hash_bucket_size,
hash_key,
combiner=combiner,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt)
class DataFrameColumn(_FeatureColumn,
collections.namedtuple("DataFrameColumn",
["column_name", "series"])):
"""Represents a feature column produced from a `DataFrame`.
Instances of this class are immutable. A `DataFrame` column may be dense or
sparse, and may have any shape, with the constraint that dimension 0 is
batch_size.
Args:
column_name: a name for this column
series: a `Series` to be wrapped, which has already had its base features
substituted with `PredefinedSeries`.
"""
def __new__(cls, column_name, series):
return super(DataFrameColumn, cls).__new__(cls, column_name, series)
@property
def name(self):
return self.column_name
@property
def config(self):
return self.series.required_base_features()
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self.name
def insert_transformed_feature(self, columns_to_tensors):
# The cache must already contain mappings from the expected base feature
# names to Tensors.
# Passing columns_to_tensors as the cache here means that multiple outputs
# of the transform will be cached, keyed by the repr of their associated
# TransformedSeries.
# The specific requested output ends up in columns_to_tensors twice: once
# keyed by the TransformedSeries repr, and once keyed by this
# DataFrameColumn instance.
columns_to_tensors[self] = self.series.build(columns_to_tensors)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.to_float(input_tensor)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
return self._to_dnn_input_layer(input_tensor)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def _get_feature_config(feature_column):
"""Returns configuration for the base feature defined in feature_column."""
if not isinstance(feature_column, _FeatureColumn):
raise TypeError(
"feature_columns should only contain instances of _FeatureColumn. "
"Given column is {}".format(feature_column))
if isinstance(feature_column, (_SparseColumn, _WeightedSparseColumn,
_EmbeddingColumn, _RealValuedColumn,
_RealValuedVarLenColumn,
_BucketizedColumn, _CrossedColumn,
_OneHotColumn, _ScatteredEmbeddingColumn)):
return feature_column.config
raise TypeError("Not supported _FeatureColumn type. "
"Given column is {}".format(feature_column))
def create_feature_spec_for_parsing(feature_columns):
"""Helper that prepares features config from input feature_columns.
The returned feature config can be used as arg 'features' in tf.parse_example.
Typical usage example:
```python
# Define features and transformations
feature_a = sparse_column_with_vocabulary_file(...)
feature_b = real_valued_column(...)
feature_c_bucketized = bucketized_column(real_valued_column("feature_c"), ...)
feature_a_x_feature_c = crossed_column(
columns=[feature_a, feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
batch_examples = tf.parse_example(
serialized=serialized_examples,
features=create_feature_spec_for_parsing(feature_columns))
```
For the above example, create_feature_spec_for_parsing would return the dict:
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn, unless
feature_columns is a dict -- in which case, this should be true of all
values in the dict.
Returns:
A dict mapping feature keys to FixedLenFeature or VarLenFeature values.
"""
if isinstance(feature_columns, dict):
feature_columns = feature_columns.values()
features_config = {}
for column in feature_columns:
features_config.update(_get_feature_config(column))
return features_config
def _create_sequence_feature_spec_for_parsing(sequence_feature_columns,
allow_missing_by_default=False):
"""Prepares a feature spec for parsing `tf.SequenceExample`s.
Args:
sequence_feature_columns: an iterable containing all the feature columns.
All items should be instances of classes derived from `_FeatureColumn`.
allow_missing_by_default: whether to set `allow_missing=True` by default for
`FixedLenSequenceFeature`s.
Returns:
A dict mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature`.
"""
feature_spec = create_feature_spec_for_parsing(sequence_feature_columns)
sequence_feature_spec = {}
for key, feature in feature_spec.items():
if isinstance(feature, parsing_ops.VarLenFeature):
sequence_feature = feature
elif (isinstance(feature, parsing_ops.FixedLenFeature) or
isinstance(feature, parsing_ops.FixedLenSequenceFeature)):
default_is_set = feature.default_value is not None
if default_is_set:
logging.warning(
'Found default value {} for feature "{}". Ignoring this value and '
'setting `allow_missing=True` instead.'.
format(feature.default_value, key))
sequence_feature = parsing_ops.FixedLenSequenceFeature(
shape=feature.shape,
dtype=feature.dtype,
allow_missing=(allow_missing_by_default or default_is_set))
else:
raise TypeError(
"Unsupported feature type: {}".format(type(feature).__name__))
sequence_feature_spec[key] = sequence_feature
return sequence_feature_spec
def make_place_holder_tensors_for_base_features(feature_columns):
"""Returns placeholder tensors for inference.
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
Returns:
A dict mapping feature keys to SparseTensors (sparse columns) or
placeholder Tensors (dense columns).
"""
# Get dict mapping features to FixedLenFeature or VarLenFeature values.
dict_for_parse_example = create_feature_spec_for_parsing(feature_columns)
placeholders = {}
for column_name, column_type in dict_for_parse_example.items():
if isinstance(column_type, parsing_ops.VarLenFeature):
# Sparse placeholder for sparse tensors.
placeholders[column_name] = array_ops.sparse_placeholder(
column_type.dtype, name="Placeholder_{}".format(column_name))
else:
# Simple placeholder for dense tensors.
placeholders[column_name] = array_ops.placeholder(
column_type.dtype,
shape=(None, column_type.shape[0]),
name="Placeholder_{}".format(column_name))
return placeholders
class _SparseIdLookupConfig(
collections.namedtuple("_SparseIdLookupConfig",
["vocabulary_file", "keys", "num_oov_buckets",
"vocab_size", "default_value"])):
"""Defines lookup configuration for a sparse feature.
An immutable object defines lookup table configuration used by
tf.feature_to_id_v2.
Attributes:
vocabulary_file: The vocabulary filename. vocabulary_file cannot be combined
with keys.
keys: A 1-D string iterable that specifies the mapping of strings to
indices. It means a feature in keys will map to it's index in keys.
num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of
vocabulary features will be ignored.
vocab_size: Number of the elements in the vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
"""
def __new__(cls,
vocabulary_file=None,
keys=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1):
return super(_SparseIdLookupConfig, cls).__new__(cls, vocabulary_file, keys,
num_oov_buckets,
vocab_size, default_value)
| apache-2.0 |
laurenrevere/osf.io | scripts/osfstorage/glacier_audit.py | 6 | 3553 | #!/usr/bin/env python
# encoding: utf-8
"""Verify that all `OsfStorageFileVersion` records created earlier than two
days before the latest inventory report are contained in the inventory, point
to the correct Glacier archive, and have an archive of the correct size.
Should be run after `glacier_inventory.py`.
"""
import gc
import json
import logging
from dateutil.parser import parse as parse_date
from dateutil.relativedelta import relativedelta
from framework.celery_tasks import app as celery_app
from website.app import init_app
from osf.models import FileVersion
from scripts import utils as scripts_utils
from scripts.osfstorage import settings as storage_settings
from scripts.osfstorage import utils as storage_utils
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# Glacier inventories take about four hours to generate and reflect files added
# about a day before the request is made; only check records created over two
# days before the job.
DELTA_DATE = relativedelta(days=2)
class AuditError(Exception):
pass
class NotFound(AuditError):
pass
class BadSize(AuditError):
pass
class BadArchiveId(AuditError):
pass
def get_targets(date):
return FileVersion.objects.filter(
created__lt=date - DELTA_DATE, metadata__has_key='archive', location__isnull=False
).iterator()
def check_glacier_version(version, inventory):
data = inventory.get(version.metadata['archive'])
if data is None:
raise NotFound('Glacier archive for version {} not found'.format(version._id))
if version.metadata['archive'] != data['ArchiveId']:
raise BadArchiveId(
'Glacier archive for version {} has incorrect archive ID {} (expected {})'.format(
version._id,
data['ArchiveId'],
version.metadata['archive'],
)
)
if (version.size or version.metadata.get('size')) != data['Size']:
raise BadSize(
'Glacier archive for version {} has incorrect size {} (expected {})'.format(
version._id,
data['Size'],
version.size,
)
)
def main(job_id=None):
glacier = storage_utils.get_glacier_resource()
if job_id:
job = glacier.Job(
storage_settings.GLACIER_VAULT_ACCOUNT_ID,
storage_settings.GLACIER_VAULT_NAME,
job_id,
)
else:
vault = storage_utils.get_glacier_resource().Vault(
storage_settings.GLACIER_VAULT_ACCOUNT_ID,
storage_settings.GLACIER_VAULT_NAME
)
jobs = vault.completed_jobs.all()
if not jobs:
raise RuntimeError('No completed jobs found')
job = sorted(jobs, key=lambda job: job.creation_date)[-1]
response = job.get_output()
output = json.loads(response['body'].read().decode('utf-8'))
creation_date = parse_date(job.creation_date)
inventory = {
each['ArchiveId']: each
for each in output['ArchiveList']
}
for idx, version in enumerate(get_targets(creation_date)):
try:
check_glacier_version(version, inventory)
except AuditError as error:
logger.error(str(error))
if idx % 1000 == 0:
gc.collect()
@celery_app.task(name='scripts.osfstorage.glacier_audit')
def run_main(job_id=None, dry_run=True):
init_app(set_backends=True, routes=False)
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
main(job_id=job_id)
| apache-2.0 |
tdenniston/Halide | python_bindings/tutorial/lesson_11_cross_compilation.py | 6 | 6731 | #!/usr/bin/python3
# Halide tutorial lesson 11.
# This lesson demonstrates how to use Halide as a cross-compiler.
# This lesson can be built by invoking the command:
# make tutorial_lesson_11_cross_compilation
# in a shell with the current directory at the top of the halide source tree.
# Otherwise, see the platform-specific compiler invocations below.
# On linux, you can compile and run it like so:
# g++ lesson_11*.cpp -g -std=c++11 -I ../include -L ../bin -lHalide -lpthread -ldl -o lesson_11
# LD_LIBRARY_PATH=../bin ./lesson_11
# On os x:
# g++ lesson_11*.cpp -g -std=c++11 -I ../include -L ../bin -lHalide -o lesson_11
# DYLD_LIBRARY_PATH=../bin ./lesson_11
#include "Halide.h"
#include <stdio.h>
#using namespace Halide
from halide import *
from struct import unpack
def main():
# We'll define the simple one-stage pipeline that we used in lesson 10.
brighter = Func("brighter")
x, y = Var("x"), Var("y")
# Declare the arguments.
offset = Param(UInt(8))
input = ImageParam(UInt(8), 2)
args = ArgumentsVector()
args.append(input)
args.append(offset)
# Define the Func.
brighter[x, y] = input[x, y] + offset
# Schedule it.
brighter.vectorize(x, 16).parallel(y)
# The following line is what we did in lesson 10. It compiles an
# object file suitable for the system that you're running this
# program on. For example, if you compile and run this file on
# 64-bit linux on an x86 cpu with sse4.1, then the generated code
# will be suitable for 64-bit linux on x86 with sse4.1.
brighter.compile_to_file("lesson_11_host", args)
# We can also compile object files suitable for other cpus and
# operating systems. You do this with an optional third argument
# to compile_to_file which specifies the target to compile for.
create_android = True
create_windows = True
create_ios = True
if create_android:
# Let's use this to compile a 32-bit arm android version of this code:
target = Target()
target.os = TargetOS.Android # The operating system
target.arch = TargetArch.ARM # The CPU architecture
target.bits = 32 # The bit-width of the architecture
arm_features = FeaturesVector() # A list of features to set
target.set_features(arm_features)
# Pass the target as the last argument.
brighter.compile_to_file("lesson_11_arm_32_android", args, target)
if create_windows:
# And now a Windows object file for 64-bit x86 with AVX and SSE 4.1:
target = Target()
target.os = TargetOS.Windows
target.arch = TargetArch.X86
target.bits = 64
x86_features = FeaturesVector()
x86_features.append(TargetFeature.AVX)
x86_features.append(TargetFeature.SSE41)
target.set_features(x86_features)
brighter.compile_to_file("lesson_11_x86_64_windows", args, target)
if create_ios:
# And finally an iOS mach-o object file for one of Apple's 32-bit
# ARM processors - the A6. It's used in the iPhone 5. The A6 uses
# a slightly modified ARM architecture called ARMv7s. We specify
# this using the target features field. Support for Apple's
# 64-bit ARM processors is very new in llvm, and still somewhat
# flaky.
target = Target()
target.os = TargetOS.IOS
target.arch = TargetArch.ARM
target.bits = 32
armv7s_features = FeaturesVector()
armv7s_features.append(TargetFeature.ARMv7s)
target.set_features(armv7s_features)
brighter.compile_to_file("lesson_11_arm_32_ios", args, target)
# Now let's check these files are what they claim, by examining
# their first few bytes.
if create_android:
# 32-arm android object files start with the magic bytes:
# uint8_t []
arm_32_android_magic = [0x7f, ord('E'), ord('L'), ord('F'), # ELF format
1, # 32-bit
1, # 2's complement little-endian
1] # Current version of elf
length = len(arm_32_android_magic)
f = open("lesson_11_arm_32_android.o", "rb")
try:
header_bytes = f.read(length)
except:
print("Android object file not generated")
return -1
f.close()
header = list(unpack("B"*length, header_bytes))
if header != arm_32_android_magic:
print([x == y for x, y in zip(header, arm_32_android_magic)])
raise Exception("Unexpected header bytes in 32-bit arm object file.")
return -1
if create_windows:
# 64-bit windows object files start with the magic 16-bit value 0x8664
# (presumably referring to x86-64)
# uint8_t []
win_64_magic = [0x64, 0x86]
f = open("lesson_11_x86_64_windows.obj", "rb")
try:
header_bytes = f.read(2)
except:
print("Windows object file not generated")
return -1
f.close()
header = list(unpack("B"*2, header_bytes))
if header != win_64_magic:
raise Exception("Unexpected header bytes in 64-bit windows object file.")
return -1
if create_ios:
# 32-bit arm iOS mach-o files start with the following magic bytes:
# uint32_t []
arm_32_ios_magic = [
0xfeedface, # Mach-o magic bytes
#0xfe, 0xed, 0xfa, 0xce, # Mach-o magic bytes
12, # CPU type is ARM
11, # CPU subtype is ARMv7s
1] # It's a relocatable object file.
f = open("lesson_11_arm_32_ios.o", "rb")
try:
header_bytes = f.read(4*4)
except:
print("ios object file not generated")
return -1
f.close()
header = list(unpack("I"*4, header_bytes))
if header != arm_32_ios_magic:
raise Exception("Unexpected header bytes in 32-bit arm ios object file.")
return -1
# It looks like the object files we produced are plausible for
# those targets. We'll count that as a success for the purposes
# of this tutorial. For a real application you'd then need to
# figure out how to integrate Halide into your cross-compilation
# toolchain. There are several small examples of this in the
# Halide repository under the apps folder. See HelloAndroid and
# HelloiOS here:
# https:#github.com/halide/Halide/tree/master/apps/
print("Success!")
return 0
if __name__ == "__main__":
main()
| mit |
wzmao/mbio | format.py | 2 | 1229 | import os
import sys
def autop(x, allyes=0):
print x
a = os.popen('autopep8 ' + x + ' -d').read()
if a == '':
print '>>>> No change.'
else:
print a
print x
if not allyes:
a = raw_input("Do you want to change?(y/n):")
if a == 'y':
a = os.popen('autopep8 ' + x + ' -i').read()
else:
print "Didn't change it."
else:
print 'Allyes=1 so correct it automatically.'
a = os.popen('autopep8 ' + x + ' -i').read()
def check(x, allyes=0):
l = os.listdir(x)
for i in l:
if os.path.isfile(os.path.join(x, i)) and os.path.join(x, i).endswith('.py') and not i.startswith('.'):
autop(os.path.join(x, i), allyes=allyes)
if os.path.isdir(os.path.join(x, i)) and not i.startswith('.') and i != 'build':
check(os.path.join(x, i), allyes=allyes)
print '#' * int(os.popen('stty size').read().split()[-1])
if len(sys.argv) > 1 and any([i in sys.argv[1:] for i in ['y', 'Y', '-y', '-Y']]):
allyes = 1
else:
allyes = 0
check(os.path.abspath(os.path.dirname(sys.argv[0])), allyes=allyes)
print '#' * int(os.popen('stty size').read().split()[-1])
| mit |
Pikecillo/genna | external/4Suite-XML-1.0.2/test/Xml/XPath/Core/test_nodeset_expr.py | 1 | 2062 | #!/usr/bin/env python
#
# File Name: File Name
#
# Documentation: http://docs.fourthought.com/file/name.html
#
def Test(tester):
tester.startGroup('Node-set Expressions')
tester.startTest('Creating test environment')
from Ft.Xml.XPath import ParsedExpr
from Ft.Xml.XPath import ParsedPredicateList
DomTree = tester.test_data['tree']
import DummyExpr
from DummyExpr import boolT, boolF
from DummyExpr import num3, numN4, num4p5
from DummyExpr import strPi, strText
nodeset0 = DummyExpr.DummyNodeSetExpr([])
nodeset1 = DummyExpr.DummyNodeSetExpr([DomTree.ROOT])
nodeset2 = DummyExpr.DummyNodeSetExpr([DomTree.ROOT, DomTree.CHILD1])
nodeset3 = DummyExpr.DummyNodeSetExpr([DomTree.CHILD1])
nodeset4 = DummyExpr.DummyNodeSetExpr([DomTree.CHILD3])
from Ft.Xml.XPath import Context
context1 = Context.Context(DomTree.CHILD1,1,2)
context2 = Context.Context(DomTree.CHILD2,2,2)
plT = ParsedPredicateList.ParsedPredicateList([boolT])
plF = ParsedPredicateList.ParsedPredicateList([boolF])
tests = {ParsedExpr.ParsedFilterExpr : [((nodeset2, plT), context1, list(nodeset2.val)),
((nodeset2, plF), context1, []),
],
ParsedExpr.ParsedPathExpr : [((0, nodeset2, nodeset1), context1, list(nodeset1.val)),
],
ParsedExpr.ParsedUnionExpr : [((nodeset2, nodeset1), context1, list(nodeset2.val)),
],
}
tester.testDone()
for (expr, boolTests) in tests.items():
for (args, context, expected) in boolTests:
p = apply(expr, args)
tester.startTest('Comparing %s' % repr(p))
result = p.evaluate(context)
tester.compare(result, expected)
tester.testDone()
tester.groupDone()
if __name__ == '__main__':
from Ft.Lib.TestSuite import Tester
tester = Tester.Tester()
Test(tester)
| gpl-2.0 |
AstroTech/atlassian-python-api | examples/bamboo/bamboo_label_based_cleaner.py | 2 | 2343 | import logging
from datetime import datetime
from datetime import timedelta
from atlassian import Bamboo
"""
Example shows how to clean up expired build results for specific label.
Feel free to modify OLDER_DAYS and LABEL parameters.
You can remove, after changing value for DRY_RUN variable
"""
logging.basicConfig(level=logging.ERROR)
BAMBOO_LOGIN = "admin"
BAMBOO_PASSWORD = "password"
BAMBOO_URL = "https://bamboo.example.com"
DRY_RUN = True
LABEL = "cores_found"
OLDER_DAYS = 60
def get_all_projects():
return [x["key"] for x in bamboo.projects(max_results=10000)]
def get_plans_from_project(project_key):
return [x["key"] for x in bamboo.project_plans(project_key, max_results=1000)]
if __name__ == "__main__":
bamboo = Bamboo(url=BAMBOO_URL, username=BAMBOO_LOGIN, password=BAMBOO_PASSWORD, timeout=180)
projects = get_all_projects()
print("Start analyzing the {} projects".format(len(projects)))
for project in projects:
print("Inspecting {} project".format(project))
plans = get_plans_from_project(project)
print("Start analyzing the {} plans".format(len(plans)))
for plan in plans:
print("Inspecting {} plan".format(plan))
build_results = [
x for x in bamboo.results(plan_key=plan, label=LABEL, max_results=100, include_all_states=True)
]
for build in build_results:
build_key = build.get("buildResultKey") or None
print("Inspecting {} build".format(build_key))
build_value = bamboo.build_result(build_key)
build_complete_time = build_value.get("buildCompletedTime") or None
if not build_complete_time:
continue
datetimeObj = datetime.strptime(build_complete_time.split("+")[0] + "000", "%Y-%m-%dT%H:%M:%S.%f")
if datetime.now() > datetimeObj + timedelta(days=OLDER_DAYS):
print(
"Build is old {} as build complete date {}".format(
build_key, build_complete_time.strftime("%Y-%m-%d")
)
)
if not DRY_RUN:
print("Removing {} build".format(build_key))
bamboo.delete_build_result(build_key)
| apache-2.0 |
jfmartinez64/test | couchpotato/core/media/movie/_base/main.py | 15 | 14072 | import traceback
import time
from CodernityDB.database import RecordNotFound
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import splitString, getTitle, getImdb, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie import MovieTypeBase
import six
log = CPLog(__name__)
class MovieBase(MovieTypeBase):
_type = 'movie'
def __init__(self):
# Initialize this type
super(MovieBase, self).__init__()
self.initType()
addApiView('movie.add', self.addView, docs = {
'desc': 'Add new movie to the wanted list',
'return': {'type': 'object', 'example': """{
'success': True,
'movie': object
}"""},
'params': {
'identifier': {'desc': 'IMDB id of the movie your want to add.'},
'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'},
'force_readd': {'desc': 'Force re-add even if movie already in wanted or manage. Default: True'},
'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'},
'title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
}
})
addApiView('movie.edit', self.edit, docs = {
'desc': 'Add new movie to the wanted list',
'params': {
'id': {'desc': 'Movie ID(s) you want to edit.', 'type': 'int (comma separated)'},
'profile_id': {'desc': 'ID of quality profile you want the edit the movie to.'},
'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'},
'default_title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
}
})
addEvent('movie.add', self.add)
addEvent('movie.update', self.update)
addEvent('movie.update_release_dates', self.updateReleaseDate)
def add(self, params = None, force_readd = True, search_after = True, update_after = True, notify_after = True, status = None):
if not params: params = {}
# Make sure it's a correct zero filled imdb id
params['identifier'] = getImdb(params.get('identifier', ''))
if not params.get('identifier'):
msg = 'Can\'t add movie without imdb identifier.'
log.error(msg)
fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg)
return False
elif not params.get('info'):
try:
is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), adding = True, single = True)
if not is_movie:
msg = 'Can\'t add movie, seems to be a TV show.'
log.error(msg)
fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg)
return False
except:
pass
info = params.get('info')
if not info or (info and len(info.get('titles', [])) == 0):
info = fireEvent('movie.info', merge = True, extended = False, identifier = params.get('identifier'))
# Allow force re-add overwrite from param
if 'force_readd' in params:
fra = params.get('force_readd')
force_readd = fra.lower() not in ['0', '-1'] if not isinstance(fra, bool) else fra
# Set default title
default_title = toUnicode(info.get('title'))
titles = info.get('titles', [])
counter = 0
def_title = None
for title in titles:
if (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title):
def_title = toUnicode(title)
break
counter += 1
if not def_title:
def_title = toUnicode(titles[0])
# Default profile and category
default_profile = {}
if (not params.get('profile_id') and status != 'done') or params.get('ignore_previous', False):
default_profile = fireEvent('profile.default', single = True)
cat_id = params.get('category_id')
try:
db = get_db()
media = {
'_t': 'media',
'type': 'movie',
'title': def_title,
'identifiers': {
'imdb': params.get('identifier')
},
'status': status if status else 'active',
'profile_id': params.get('profile_id') or default_profile.get('_id'),
'category_id': cat_id if cat_id is not None and len(cat_id) > 0 and cat_id != '-1' else None,
}
# Update movie info
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
media['info'] = info
new = False
previous_profile = None
try:
m = db.get('media', 'imdb-%s' % params.get('identifier'), with_doc = True)['doc']
try:
db.get('id', m.get('profile_id'))
previous_profile = m.get('profile_id')
except RecordNotFound:
pass
except:
log.error('Failed getting previous profile: %s', traceback.format_exc())
except:
new = True
m = db.insert(media)
# Update dict to be usable
m.update(media)
added = True
do_search = False
search_after = search_after and self.conf('search_on_add', section = 'moviesearcher')
onComplete = None
if new:
if search_after:
onComplete = self.createOnComplete(m['_id'])
search_after = False
elif force_readd:
# Clean snatched history
for release in fireEvent('release.for_media', m['_id'], single = True):
if release.get('status') in ['downloaded', 'snatched', 'seeding', 'done']:
if params.get('ignore_previous', False):
fireEvent('release.update_status', release['_id'], status = 'ignored')
else:
fireEvent('release.delete', release['_id'], single = True)
m['profile_id'] = (params.get('profile_id') or default_profile.get('_id')) if not previous_profile else previous_profile
m['category_id'] = cat_id if cat_id is not None and len(cat_id) > 0 else (m.get('category_id') or None)
m['last_edit'] = int(time.time())
m['tags'] = []
do_search = True
db.update(m)
else:
try: del params['info']
except: pass
log.debug('Movie already exists, not updating: %s', params)
added = False
# Trigger update info
if added and update_after:
# Do full update to get images etc
fireEventAsync('movie.update', m['_id'], default_title = params.get('title'), on_complete = onComplete)
# Remove releases
for rel in fireEvent('release.for_media', m['_id'], single = True):
if rel['status'] is 'available':
db.delete(rel)
movie_dict = fireEvent('media.get', m['_id'], single = True)
if not movie_dict:
log.debug('Failed adding media, can\'t find it anymore')
return False
if do_search and search_after:
onComplete = self.createOnComplete(m['_id'])
onComplete()
if added and notify_after:
if params.get('title'):
message = 'Successfully added "%s" to your wanted list.' % params.get('title', '')
else:
title = getTitle(m)
if title:
message = 'Successfully added "%s" to your wanted list.' % title
else:
message = 'Successfully added to your wanted list.'
fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = message)
return movie_dict
except:
log.error('Failed adding media: %s', traceback.format_exc())
def addView(self, **kwargs):
add_dict = self.add(params = kwargs)
return {
'success': True if add_dict else False,
'movie': add_dict,
}
def edit(self, id = '', **kwargs):
try:
db = get_db()
ids = splitString(id)
for media_id in ids:
try:
m = db.get('id', media_id)
m['profile_id'] = kwargs.get('profile_id') or m['profile_id']
cat_id = kwargs.get('category_id')
if cat_id is not None:
m['category_id'] = cat_id if len(cat_id) > 0 else m['category_id']
# Remove releases
for rel in fireEvent('release.for_media', m['_id'], single = True):
if rel['status'] is 'available':
db.delete(rel)
# Default title
if kwargs.get('default_title'):
m['title'] = kwargs.get('default_title')
db.update(m)
fireEvent('media.restatus', m['_id'], single = True)
m = db.get('id', media_id)
movie_dict = fireEvent('media.get', m['_id'], single = True)
fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(media_id))
except:
print traceback.format_exc()
log.error('Can\'t edit non-existing media')
return {
'success': True,
}
except:
log.error('Failed editing media: %s', traceback.format_exc())
return {
'success': False,
}
def update(self, media_id = None, identifier = None, default_title = None, extended = False):
"""
Update movie information inside media['doc']['info']
@param media_id: document id
@param default_title: default title, if empty, use first one or existing one
@param extended: update with extended info (parses more info, actors, images from some info providers)
@return: dict, with media
"""
if self.shuttingDown():
return
lock_key = 'media.get.%s' % media_id if media_id else identifier
self.acquireLock(lock_key)
media = {}
try:
db = get_db()
if media_id:
media = db.get('id', media_id)
else:
media = db.get('media', 'imdb-%s' % identifier, with_doc = True)['doc']
info = fireEvent('movie.info', merge = True, extended = extended, identifier = getIdentifier(media))
# Don't need those here
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
if not info or len(info) == 0:
log.error('Could not update, no movie info to work with: %s', identifier)
return False
# Update basic info
media['info'] = info
titles = info.get('titles', [])
log.debug('Adding titles: %s', titles)
# Define default title
if default_title:
def_title = None
if default_title:
counter = 0
for title in titles:
if title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title):
def_title = toUnicode(title)
break
counter += 1
if not def_title:
def_title = toUnicode(titles[0])
media['title'] = def_title
# Files
image_urls = info.get('images', [])
self.getPoster(media, image_urls)
db.update(media)
except:
log.error('Failed update media: %s', traceback.format_exc())
self.releaseLock(lock_key)
return media
def updateReleaseDate(self, media_id):
"""
Update release_date (eta) info only
@param media_id: document id
@return: dict, with dates dvd, theater, bluray, expires
"""
try:
db = get_db()
media = db.get('id', media_id)
if not media.get('info'):
media = self.update(media_id)
dates = media.get('info', {}).get('release_date')
else:
dates = media.get('info').get('release_date')
if dates and (dates.get('expires', 0) < time.time() or dates.get('expires', 0) > time.time() + (604800 * 4)) or not dates:
dates = fireEvent('movie.info.release_date', identifier = getIdentifier(media), merge = True)
media['info'].update({'release_date': dates})
db.update(media)
return dates
except:
log.error('Failed updating release dates: %s', traceback.format_exc())
return {}
| gpl-3.0 |
mlmurray/TensorFlow-Experimentation | examples/3 - Neural Networks/alexnet.py | 1 | 5087 | '''
AlexNet implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
AlexNet Paper (http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
# Import MINST data
import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
import tensorflow as tf
# Parameters
learning_rate = 0.001
training_iters = 200000
batch_size = 64
display_step = 20
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.8 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.types.float32, [None, n_input])
y = tf.placeholder(tf.types.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.types.float32) # dropout (keep probability)
# Create AlexNet model
def conv2d(name, l_input, w, b):
return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME'),b), name=name)
def max_pool(name, l_input, k):
return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name=name)
def norm(name, l_input, lsize=4):
return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)
def alex_net(_X, _weights, _biases, _dropout):
# Reshape input picture
_X = tf.reshape(_X, shape=[-1, 28, 28, 1])
# Convolution Layer
conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'])
# Max Pooling (down-sampling)
pool1 = max_pool('pool1', conv1, k=2)
# Apply Normalization
norm1 = norm('norm1', pool1, lsize=4)
# Apply Dropout
norm1 = tf.nn.dropout(norm1, _dropout)
# Convolution Layer
conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'])
# Max Pooling (down-sampling)
pool2 = max_pool('pool2', conv2, k=2)
# Apply Normalization
norm2 = norm('norm2', pool2, lsize=4)
# Apply Dropout
norm2 = tf.nn.dropout(norm2, _dropout)
# Convolution Layer
conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])
# Max Pooling (down-sampling)
pool3 = max_pool('pool3', conv3, k=2)
# Apply Normalization
norm3 = norm('norm3', pool3, lsize=4)
# Apply Dropout
norm3 = tf.nn.dropout(norm3, _dropout)
# Fully connected layer
dense1 = tf.reshape(norm3, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv3 output to fit dense layer input
dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1') # Relu activation
dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2') # Relu activation
# Output, class prediction
out = tf.matmul(dense2, _weights['out']) + _biases['out']
return out
# Store layers weight & bias
weights = {
'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])),
'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])),
'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])),
'wd1': tf.Variable(tf.random_normal([4*4*256, 1024])),
'wd2': tf.Variable(tf.random_normal([1024, 1024])),
'out': tf.Variable(tf.random_normal([1024, 10]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([64])),
'bc2': tf.Variable(tf.random_normal([128])),
'bc3': tf.Variable(tf.random_normal([256])),
'bd1': tf.Variable(tf.random_normal([1024])),
'bd2': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = alex_net(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.types.float32))
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
if step % display_step == 0:
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)
step += 1
print "Optimization Finished!"
# Calculate accuracy for 256 mnist test images
print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.})
| mit |
kuasha/cosmos | samples/barebone/views.py | 1 | 3616 | import logging
import settings
from tornado.httpclient import AsyncHTTPClient
import cosmos
from cosmos.service.auth import BasicLoginHandler
__author__ = 'Maruf Maniruzzaman'
import tornado
from tornado import gen
import json
from cosmos.service.requesthandler import RequestHandler
class IndexHandler(RequestHandler):
@gen.coroutine
def get(self):
try:
with open(settings.INDEX_HTML_PATH) as f:
self.write(f.read())
except IOError as e:
msg = """
File not found {}.
If you are developing cosmos create a local_settings.py file beside cosmosmain.py with following content:
import os
STATIC_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../adminpanel/app")
TEMPLATE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../adminpanel/templates")
INDEX_HTML_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../adminpanel/app/index.html")
""".format(settings.INDEX_HTML_PATH)
raise tornado.web.HTTPError(404, msg)
class LoginHandler(BasicLoginHandler):
@gen.coroutine
def get(self):
next = self.get_argument("next", '/')
try:
with open(settings.LOGIN_HTML_PATH) as f:
login_template = f.read()
self._show_login_window(next, login_template=login_template)
except IOError as e:
raise tornado.web.HTTPError(404, "File not found")
class AuthPublicKeyHandler(RequestHandler):
@gen.coroutine
def get(self, tenant_id):
self.set_header("Content-Type", 'application/x-pem-file')
self.set_header('Content-Disposition', 'attachment; filename=%s_pub.pem' % tenant_id)
self.write(settings.OAUTH2_PUBLIC_KEY_PEM)
class OAuth2DummyClientHandler(RequestHandler):
@gen.coroutine
def get(self, function):
protocol = self.request.protocol
host = self.request.host
oauth2_service_host = protocol + "://"+ host
#oauth2_service_host = "https://authp.com"
tenant_id = settings.TENANT_ID
self.write(self.request.uri + " <br />" + function + "<br />")
params = json.dumps({k: self.get_argument(k) for k in self.request.arguments})
self.write(params)
code = self.get_argument("code", "temp")
token = self.get_argument("access_token", default=None)
if token:
http_client = AsyncHTTPClient()
resp = yield http_client.fetch("{0}/{1}/auth/key/".format(oauth2_service_host, tenant_id))
if not resp or not resp.code == 200 or resp.body is None:
self.write("Could not get auth server public key")
else:
pub_pem = resp.body
logging.debug("Public key: {0}".format(pub_pem))
header, claims = cosmos.auth.oauth2.verify_token(token, pub_pem, ['RS256'])
self.write("<br /><hr />")
self.write(json.dumps(header))
self.write("<br /><hr />")
self.write(json.dumps(claims))
self.write("<br /><hr />")
self.write("<a href='{}/{}/oauth2/authorize/?response_type=code&state=mystate&resource=myresource.com/test&redirect_uri={}://{}/oauth2client/authorize/?tag=2'>Request Code</a><br />".format(oauth2_service_host, settings.TENANT_ID, protocol, host))
self.write("<a href='{}/{}/oauth2/token/?code={}&state=mystate&grant_type=code&redirect_uri={}://{}/oauth2client/authorize/?tag=2'>Request Token</a><br />".format(oauth2_service_host, tenant_id, code, protocol, host))
self.finish()
| mit |
ismail-s/urwid | urwid/listbox.py | 12 | 59569 | #!/usr/bin/python
#
# Urwid listbox class
# Copyright (C) 2004-2012 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
from urwid.util import is_mouse_press
from urwid.canvas import SolidCanvas, CanvasCombine
from urwid.widget import Widget, nocache_widget_render_instance, BOX, GIVEN
from urwid.decoration import calculate_top_bottom_filler, normalize_valign
from urwid import signals
from urwid.signals import connect_signal
from urwid.monitored_list import MonitoredList, MonitoredFocusList
from urwid.container import WidgetContainerMixin
from urwid.command_map import (CURSOR_UP, CURSOR_DOWN,
CURSOR_PAGE_UP, CURSOR_PAGE_DOWN)
class ListWalkerError(Exception):
pass
class ListWalker(object):
__metaclass__ = signals.MetaSignals
signals = ["modified"]
def _modified(self):
signals.emit_signal(self, "modified")
def get_focus(self):
"""
This default implementation relies on a focus attribute and a
__getitem__() method defined in a subclass.
Override and don't call this method if these are not defined.
"""
try:
focus = self.focus
return self[focus], focus
except (IndexError, KeyError, TypeError):
return None, None
def get_next(self, position):
"""
This default implementation relies on a next_position() method and a
__getitem__() method defined in a subclass.
Override and don't call this method if these are not defined.
"""
try:
position = self.next_position(position)
return self[position], position
except (IndexError, KeyError):
return None, None
def get_prev(self, position):
"""
This default implementation relies on a prev_position() method and a
__getitem__() method defined in a subclass.
Override and don't call this method if these are not defined.
"""
try:
position = self.prev_position(position)
return self[position], position
except (IndexError, KeyError):
return None, None
class PollingListWalker(object): # NOT ListWalker subclass
def __init__(self, contents):
"""
contents -- list to poll for changes
This class is deprecated. Use SimpleFocusListWalker instead.
"""
import warnings
warnings.warn("PollingListWalker is deprecated, "
"use SimpleFocusListWalker instead.", DeprecationWarning)
self.contents = contents
if not getattr(contents, '__getitem__', None):
raise ListWalkerError("PollingListWalker expecting list like "
"object, got: %r" % (contents,))
self.focus = 0
def _clamp_focus(self):
if self.focus >= len(self.contents):
self.focus = len(self.contents)-1
def get_focus(self):
"""Return (focus widget, focus position)."""
if len(self.contents) == 0: return None, None
self._clamp_focus()
return self.contents[self.focus], self.focus
def set_focus(self, position):
"""Set focus position."""
# this class is deprecated, otherwise I might have fixed this:
assert type(position) == int
self.focus = position
def get_next(self, start_from):
"""
Return (widget after start_from, position after start_from).
"""
pos = start_from + 1
if len(self.contents) <= pos: return None, None
return self.contents[pos],pos
def get_prev(self, start_from):
"""
Return (widget before start_from, position before start_from).
"""
pos = start_from - 1
if pos < 0: return None, None
return self.contents[pos],pos
class SimpleListWalker(MonitoredList, ListWalker):
def __init__(self, contents):
"""
contents -- list to copy into this object
Changes made to this object (when it is treated as a list) are
detected automatically and will cause ListBox objects using
this list walker to be updated.
"""
if not getattr(contents, '__getitem__', None):
raise ListWalkerError, "SimpleListWalker expecting list like object, got: %r"%(contents,)
MonitoredList.__init__(self, contents)
self.focus = 0
def _get_contents(self):
"""
Return self.
Provides compatibility with old SimpleListWalker class.
"""
return self
contents = property(_get_contents)
def _modified(self):
if self.focus >= len(self):
self.focus = max(0, len(self)-1)
ListWalker._modified(self)
def set_modified_callback(self, callback):
"""
This function inherited from MonitoredList is not
implemented in SimpleListWalker.
Use connect_signal(list_walker, "modified", ...) instead.
"""
raise NotImplementedError('Use connect_signal('
'list_walker, "modified", ...) instead.')
def set_focus(self, position):
"""Set focus position."""
try:
if position < 0 or position >= len(self):
raise ValueError
except (TypeError, ValueError):
raise IndexError, "No widget at position %s" % (position,)
self.focus = position
self._modified()
def next_position(self, position):
"""
Return position after start_from.
"""
if len(self) - 1 <= position:
raise IndexError
return position + 1
def prev_position(self, position):
"""
Return position before start_from.
"""
if position <= 0:
raise IndexError
return position - 1
def positions(self, reverse=False):
"""
Optional method for returning an iterable of positions.
"""
if reverse:
return xrange(len(self) - 1, -1, -1)
return xrange(len(self))
class SimpleFocusListWalker(ListWalker, MonitoredFocusList):
def __init__(self, contents):
"""
contents -- list to copy into this object
Changes made to this object (when it is treated as a list) are
detected automatically and will cause ListBox objects using
this list walker to be updated.
Also, items added or removed before the widget in focus with
normal list methods will cause the focus to be updated
intelligently.
"""
if not getattr(contents, '__getitem__', None):
raise ListWalkerError("SimpleFocusListWalker expecting list like "
"object, got: %r"%(contents,))
MonitoredFocusList.__init__(self, contents)
def set_modified_callback(self, callback):
"""
This function inherited from MonitoredList is not
implemented in SimpleFocusListWalker.
Use connect_signal(list_walker, "modified", ...) instead.
"""
raise NotImplementedError('Use connect_signal('
'list_walker, "modified", ...) instead.')
def set_focus(self, position):
"""Set focus position."""
self.focus = position
def next_position(self, position):
"""
Return position after start_from.
"""
if len(self) - 1 <= position:
raise IndexError
return position + 1
def prev_position(self, position):
"""
Return position before start_from.
"""
if position <= 0:
raise IndexError
return position - 1
def positions(self, reverse=False):
"""
Optional method for returning an iterable of positions.
"""
if reverse:
return xrange(len(self) - 1, -1, -1)
return xrange(len(self))
class ListBoxError(Exception):
pass
class ListBox(Widget, WidgetContainerMixin):
"""
a horizontally stacked list of widgets
"""
_selectable = True
_sizing = frozenset([BOX])
def __init__(self, body):
"""
:param body: a ListWalker subclass such as
:class:`SimpleFocusListWalker` that contains
widgets to be displayed inside the list box
:type body: ListWalker
"""
if getattr(body, 'get_focus', None):
self.body = body
else:
self.body = PollingListWalker(body)
try:
connect_signal(self.body, "modified", self._invalidate)
except NameError:
# our list walker has no modified signal so we must not
# cache our canvases because we don't know when our
# content has changed
self.render = nocache_widget_render_instance(self)
# offset_rows is the number of rows between the top of the view
# and the top of the focused item
self.offset_rows = 0
# inset_fraction is used when the focused widget is off the
# top of the view. it is the fraction of the widget cut off
# at the top. (numerator, denominator)
self.inset_fraction = (0,1)
# pref_col is the preferred column for the cursor when moving
# between widgets that use the cursor (edit boxes etc.)
self.pref_col = 'left'
# variable for delayed focus change used by set_focus
self.set_focus_pending = 'first selectable'
# variable for delayed valign change used by set_focus_valign
self.set_focus_valign_pending = None
def calculate_visible(self, size, focus=False ):
"""
Returns the widgets that would be displayed in
the ListBox given the current *size* and *focus*.
see :meth:`Widget.render` for parameter details
:returns: (*middle*, *top*, *bottom*) or (``None``, ``None``, ``None``)
*middle*
(*row offset*(when +ve) or *inset*(when -ve),
*focus widget*, *focus position*, *focus rows*,
*cursor coords* or ``None``)
*top*
(*# lines to trim off top*,
list of (*widget*, *position*, *rows*) tuples above focus
in order from bottom to top)
*bottom*
(*# lines to trim off bottom*,
list of (*widget*, *position*, *rows*) tuples below focus
in order from top to bottom)
"""
(maxcol, maxrow) = size
# 0. set the focus if a change is pending
if self.set_focus_pending or self.set_focus_valign_pending:
self._set_focus_complete( (maxcol, maxrow), focus )
# 1. start with the focus widget
focus_widget, focus_pos = self.body.get_focus()
if focus_widget is None: #list box is empty?
return None,None,None
top_pos = focus_pos
offset_rows, inset_rows = self.get_focus_offset_inset(
(maxcol,maxrow))
# force at least one line of focus to be visible
if maxrow and offset_rows >= maxrow:
offset_rows = maxrow -1
# adjust position so cursor remains visible
cursor = None
if maxrow and focus_widget.selectable() and focus:
if hasattr(focus_widget,'get_cursor_coords'):
cursor=focus_widget.get_cursor_coords((maxcol,))
if cursor is not None:
cx, cy = cursor
effective_cy = cy + offset_rows - inset_rows
if effective_cy < 0: # cursor above top?
inset_rows = cy
elif effective_cy >= maxrow: # cursor below bottom?
offset_rows = maxrow - cy -1
if offset_rows < 0: # need to trim the top
inset_rows, offset_rows = -offset_rows, 0
# set trim_top by focus trimmimg
trim_top = inset_rows
focus_rows = focus_widget.rows((maxcol,),True)
# 2. collect the widgets above the focus
pos = focus_pos
fill_lines = offset_rows
fill_above = []
top_pos = pos
while fill_lines > 0:
prev, pos = self.body.get_prev( pos )
if prev is None: # run out of widgets above?
offset_rows -= fill_lines
break
top_pos = pos
p_rows = prev.rows( (maxcol,) )
if p_rows: # filter out 0-height widgets
fill_above.append( (prev, pos, p_rows) )
if p_rows > fill_lines: # crosses top edge?
trim_top = p_rows-fill_lines
break
fill_lines -= p_rows
trim_bottom = focus_rows + offset_rows - inset_rows - maxrow
if trim_bottom < 0: trim_bottom = 0
# 3. collect the widgets below the focus
pos = focus_pos
fill_lines = maxrow - focus_rows - offset_rows + inset_rows
fill_below = []
while fill_lines > 0:
next, pos = self.body.get_next( pos )
if next is None: # run out of widgets below?
break
n_rows = next.rows( (maxcol,) )
if n_rows: # filter out 0-height widgets
fill_below.append( (next, pos, n_rows) )
if n_rows > fill_lines: # crosses bottom edge?
trim_bottom = n_rows-fill_lines
fill_lines -= n_rows
break
fill_lines -= n_rows
# 4. fill from top again if necessary & possible
fill_lines = max(0, fill_lines)
if fill_lines >0 and trim_top >0:
if fill_lines <= trim_top:
trim_top -= fill_lines
offset_rows += fill_lines
fill_lines = 0
else:
fill_lines -= trim_top
offset_rows += trim_top
trim_top = 0
pos = top_pos
while fill_lines > 0:
prev, pos = self.body.get_prev( pos )
if prev is None:
break
p_rows = prev.rows( (maxcol,) )
fill_above.append( (prev, pos, p_rows) )
if p_rows > fill_lines: # more than required
trim_top = p_rows-fill_lines
offset_rows += fill_lines
break
fill_lines -= p_rows
offset_rows += p_rows
# 5. return the interesting bits
return ((offset_rows - inset_rows, focus_widget,
focus_pos, focus_rows, cursor ),
(trim_top, fill_above), (trim_bottom, fill_below))
def render(self, size, focus=False ):
"""
Render ListBox and return canvas.
see :meth:`Widget.render` for details
"""
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible(
(maxcol, maxrow), focus=focus)
if middle is None:
return SolidCanvas(" ", maxcol, maxrow)
_ignore, focus_widget, focus_pos, focus_rows, cursor = middle
trim_top, fill_above = top
trim_bottom, fill_below = bottom
combinelist = []
rows = 0
fill_above.reverse() # fill_above is in bottom-up order
for widget,w_pos,w_rows in fill_above:
canvas = widget.render((maxcol,))
if w_rows != canvas.rows():
raise ListBoxError, "Widget %r at position %r within listbox calculated %d rows but rendered %d!"% (widget,w_pos,w_rows, canvas.rows())
rows += w_rows
combinelist.append((canvas, w_pos, False))
focus_canvas = focus_widget.render((maxcol,), focus=focus)
if focus_canvas.rows() != focus_rows:
raise ListBoxError, "Focus Widget %r at position %r within listbox calculated %d rows but rendered %d!"% (focus_widget,focus_pos,focus_rows, focus_canvas.rows())
c_cursor = focus_canvas.cursor
if cursor != c_cursor:
raise ListBoxError, "Focus Widget %r at position %r within listbox calculated cursor coords %r but rendered cursor coords %r!" %(focus_widget,focus_pos,cursor,c_cursor)
rows += focus_rows
combinelist.append((focus_canvas, focus_pos, True))
for widget,w_pos,w_rows in fill_below:
canvas = widget.render((maxcol,))
if w_rows != canvas.rows():
raise ListBoxError, "Widget %r at position %r within listbox calculated %d rows but rendered %d!"% (widget,w_pos,w_rows, canvas.rows())
rows += w_rows
combinelist.append((canvas, w_pos, False))
final_canvas = CanvasCombine(combinelist)
if trim_top:
final_canvas.trim(trim_top)
rows -= trim_top
if trim_bottom:
final_canvas.trim_end(trim_bottom)
rows -= trim_bottom
if rows > maxrow:
raise ListBoxError, "Listbox contents too long! Probably urwid's fault (please report): %r" % ((top,middle,bottom),)
if rows < maxrow:
bottom_pos = focus_pos
if fill_below: bottom_pos = fill_below[-1][1]
if trim_bottom != 0 or self.body.get_next(bottom_pos) != (None,None):
raise ListBoxError, "Listbox contents too short! Probably urwid's fault (please report): %r" % ((top,middle,bottom),)
final_canvas.pad_trim_top_bottom(0, maxrow - rows)
return final_canvas
def get_cursor_coords(self, size):
"""
See :meth:`Widget.get_cursor_coords` for details
"""
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible(
(maxcol, maxrow), True)
if middle is None:
return None
offset_inset, _ignore1, _ignore2, _ignore3, cursor = middle
if not cursor:
return None
x, y = cursor
y += offset_inset
if y < 0 or y >= maxrow:
return None
return (x, y)
def set_focus_valign(self, valign):
"""Set the focus widget's display offset and inset.
:param valign: one of:
'top', 'middle', 'bottom'
('fixed top', rows)
('fixed bottom', rows)
('relative', percentage 0=top 100=bottom)
"""
vt, va = normalize_valign(valign,ListBoxError)
self.set_focus_valign_pending = vt, va
def set_focus(self, position, coming_from=None):
"""
Set the focus position and try to keep the old focus in view.
:param position: a position compatible with :meth:`self.body.set_focus`
:param coming_from: set to 'above' or 'below' if you know that
old position is above or below the new position.
:type coming_from: str
"""
if coming_from not in ('above', 'below', None):
raise ListBoxError("coming_from value invalid: %r" %
(coming_from,))
focus_widget, focus_pos = self.body.get_focus()
if focus_widget is None:
raise IndexError("Can't set focus, ListBox is empty")
self.set_focus_pending = coming_from, focus_widget, focus_pos
self.body.set_focus(position)
def get_focus(self):
"""
Return a `(focus widget, focus position)` tuple, for backwards
compatibility. You may also use the new standard container
properties :attr:`focus` and :attr:`focus_position` to read these values.
"""
return self.body.get_focus()
def _get_focus(self):
"""
Return the widget in focus according to our :obj:`list walker <ListWalker>`.
"""
return self.body.get_focus()[0]
focus = property(_get_focus,
doc="the child widget in focus or None when ListBox is empty")
def _get_focus_position(self):
"""
Return the list walker position of the widget in focus. The type
of value returned depends on the :obj:`list walker <ListWalker>`.
"""
w, pos = self.body.get_focus()
if w is None:
raise IndexError, "No focus_position, ListBox is empty"
return pos
focus_position = property(_get_focus_position, set_focus, doc="""
the position of child widget in focus. The valid values for this
position depend on the list walker in use.
:exc:`IndexError` will be raised by reading this property when the
ListBox is empty or setting this property to an invalid position.
""")
def _contents(self):
class ListBoxContents(object):
__getitem__ = self._contents__getitem__
return ListBoxContents()
def _contents__getitem__(self, key):
# try list walker protocol v2 first
getitem = getattr(self.body, '__getitem__', None)
if getitem:
try:
return (getitem(key), None)
except (IndexError, KeyError):
raise KeyError("ListBox.contents key not found: %r" % (key,))
# fall back to v1
w, old_focus = self.body.get_focus()
try:
try:
self.body.set_focus(key)
return self.body.get_focus()[0]
except (IndexError, KeyError):
raise KeyError("ListBox.contents key not found: %r" % (key,))
finally:
self.body.set_focus(old_focus)
contents = property(lambda self: self._contents, doc="""
An object that allows reading widgets from the ListBox's list
walker as a `(widget, options)` tuple. `None` is currently the only
value for options.
.. warning::
This object may not be used to set or iterate over contents.
You must use the list walker stored as
:attr:`.body` to perform manipulation and iteration, if supported.
""")
def options(self):
"""
There are currently no options for ListBox contents.
Return None as a placeholder for future options.
"""
return None
def _set_focus_valign_complete(self, size, focus):
"""
Finish setting the offset and inset now that we have have a
maxcol & maxrow.
"""
(maxcol, maxrow) = size
vt,va = self.set_focus_valign_pending
self.set_focus_valign_pending = None
self.set_focus_pending = None
focus_widget, focus_pos = self.body.get_focus()
if focus_widget is None:
return
rows = focus_widget.rows((maxcol,), focus)
rtop, rbot = calculate_top_bottom_filler(maxrow,
vt, va, GIVEN, rows, None, 0, 0)
self.shift_focus((maxcol, maxrow), rtop)
def _set_focus_first_selectable(self, size, focus):
"""
Choose the first visible, selectable widget below the
current focus as the focus widget.
"""
(maxcol, maxrow) = size
self.set_focus_valign_pending = None
self.set_focus_pending = None
middle, top, bottom = self.calculate_visible(
(maxcol, maxrow), focus=focus)
if middle is None:
return
row_offset, focus_widget, focus_pos, focus_rows, cursor = middle
trim_top, fill_above = top
trim_bottom, fill_below = bottom
if focus_widget.selectable():
return
if trim_bottom:
fill_below = fill_below[:-1]
new_row_offset = row_offset + focus_rows
for widget, pos, rows in fill_below:
if widget.selectable():
self.body.set_focus(pos)
self.shift_focus((maxcol, maxrow),
new_row_offset)
return
new_row_offset += rows
def _set_focus_complete(self, size, focus):
"""
Finish setting the position now that we have maxcol & maxrow.
"""
(maxcol, maxrow) = size
self._invalidate()
if self.set_focus_pending == "first selectable":
return self._set_focus_first_selectable(
(maxcol,maxrow), focus)
if self.set_focus_valign_pending is not None:
return self._set_focus_valign_complete(
(maxcol,maxrow), focus)
coming_from, focus_widget, focus_pos = self.set_focus_pending
self.set_focus_pending = None
# new position
new_focus_widget, position = self.body.get_focus()
if focus_pos == position:
# do nothing
return
# restore old focus temporarily
self.body.set_focus(focus_pos)
middle,top,bottom=self.calculate_visible((maxcol,maxrow),focus)
focus_offset, focus_widget, focus_pos, focus_rows, cursor=middle
trim_top, fill_above = top
trim_bottom, fill_below = bottom
offset = focus_offset
for widget, pos, rows in fill_above:
offset -= rows
if pos == position:
self.change_focus((maxcol, maxrow), pos,
offset, 'below' )
return
offset = focus_offset + focus_rows
for widget, pos, rows in fill_below:
if pos == position:
self.change_focus((maxcol, maxrow), pos,
offset, 'above' )
return
offset += rows
# failed to find widget among visible widgets
self.body.set_focus( position )
widget, position = self.body.get_focus()
rows = widget.rows((maxcol,), focus)
if coming_from=='below':
offset = 0
elif coming_from=='above':
offset = maxrow-rows
else:
offset = (maxrow-rows) // 2
self.shift_focus((maxcol, maxrow), offset)
def shift_focus(self, size, offset_inset):
"""
Move the location of the current focus relative to the top.
This is used internally by methods that know the widget's *size*.
See also :meth:`.set_focus_valign`.
:param size: see :meth:`Widget.render` for details
:param offset_inset: either the number of rows between the
top of the listbox and the start of the focus widget (+ve
value) or the number of lines of the focus widget hidden off
the top edge of the listbox (-ve value) or ``0`` if the top edge
of the focus widget is aligned with the top edge of the
listbox.
:type offset_inset: int
"""
(maxcol, maxrow) = size
if offset_inset >= 0:
if offset_inset >= maxrow:
raise ListBoxError, "Invalid offset_inset: %r, only %r rows in list box"% (offset_inset, maxrow)
self.offset_rows = offset_inset
self.inset_fraction = (0,1)
else:
target, _ignore = self.body.get_focus()
tgt_rows = target.rows( (maxcol,), True )
if offset_inset + tgt_rows <= 0:
raise ListBoxError, "Invalid offset_inset: %r, only %r rows in target!" %(offset_inset, tgt_rows)
self.offset_rows = 0
self.inset_fraction = (-offset_inset,tgt_rows)
self._invalidate()
def update_pref_col_from_focus(self, size):
"""Update self.pref_col from the focus widget."""
# TODO: should this not be private?
(maxcol, maxrow) = size
widget, old_pos = self.body.get_focus()
if widget is None: return
pref_col = None
if hasattr(widget,'get_pref_col'):
pref_col = widget.get_pref_col((maxcol,))
if pref_col is None and hasattr(widget,'get_cursor_coords'):
coords = widget.get_cursor_coords((maxcol,))
if type(coords) == tuple:
pref_col,y = coords
if pref_col is not None:
self.pref_col = pref_col
def change_focus(self, size, position,
offset_inset = 0, coming_from = None,
cursor_coords = None, snap_rows = None):
"""
Change the current focus widget.
This is used internally by methods that know the widget's *size*.
See also :meth:`.set_focus`.
:param size: see :meth:`Widget.render` for details
:param position: a position compatible with :meth:`self.body.set_focus`
:param offset_inset: either the number of rows between the
top of the listbox and the start of the focus widget (+ve
value) or the number of lines of the focus widget hidden off
the top edge of the listbox (-ve value) or 0 if the top edge
of the focus widget is aligned with the top edge of the
listbox (default if unspecified)
:type offset_inset: int
:param coming_from: either 'above', 'below' or unspecified `None`
:type coming_from: str
:param cursor_coords: (x, y) tuple indicating the desired
column and row for the cursor, a (x,) tuple indicating only
the column for the cursor, or unspecified
:type cursor_coords: (int, int)
:param snap_rows: the maximum number of extra rows to scroll
when trying to "snap" a selectable focus into the view
:type snap_rows: int
"""
(maxcol, maxrow) = size
# update pref_col before change
if cursor_coords:
self.pref_col = cursor_coords[0]
else:
self.update_pref_col_from_focus((maxcol,maxrow))
self._invalidate()
self.body.set_focus(position)
target, _ignore = self.body.get_focus()
tgt_rows = target.rows( (maxcol,), True)
if snap_rows is None:
snap_rows = maxrow - 1
# "snap" to selectable widgets
align_top = 0
align_bottom = maxrow - tgt_rows
if ( coming_from == 'above'
and target.selectable()
and offset_inset > align_bottom ):
if snap_rows >= offset_inset - align_bottom:
offset_inset = align_bottom
elif snap_rows >= offset_inset - align_top:
offset_inset = align_top
else:
offset_inset -= snap_rows
if ( coming_from == 'below'
and target.selectable()
and offset_inset < align_top ):
if snap_rows >= align_top - offset_inset:
offset_inset = align_top
elif snap_rows >= align_bottom - offset_inset:
offset_inset = align_bottom
else:
offset_inset += snap_rows
# convert offset_inset to offset_rows or inset_fraction
if offset_inset >= 0:
self.offset_rows = offset_inset
self.inset_fraction = (0,1)
else:
if offset_inset + tgt_rows <= 0:
raise ListBoxError, "Invalid offset_inset: %s, only %s rows in target!" %(offset_inset, tgt_rows)
self.offset_rows = 0
self.inset_fraction = (-offset_inset,tgt_rows)
if cursor_coords is None:
if coming_from is None:
return # must either know row or coming_from
cursor_coords = (self.pref_col,)
if not hasattr(target,'move_cursor_to_coords'):
return
attempt_rows = []
if len(cursor_coords) == 1:
# only column (not row) specified
# start from closest edge and move inwards
(pref_col,) = cursor_coords
if coming_from=='above':
attempt_rows = range( 0, tgt_rows )
else:
assert coming_from == 'below', "must specify coming_from ('above' or 'below') if cursor row is not specified"
attempt_rows = range( tgt_rows, -1, -1)
else:
# both column and row specified
# start from preferred row and move back to closest edge
(pref_col, pref_row) = cursor_coords
if pref_row < 0 or pref_row >= tgt_rows:
raise ListBoxError, "cursor_coords row outside valid range for target. pref_row:%r target_rows:%r"%(pref_row,tgt_rows)
if coming_from=='above':
attempt_rows = range( pref_row, -1, -1 )
elif coming_from=='below':
attempt_rows = range( pref_row, tgt_rows )
else:
attempt_rows = [pref_row]
for row in attempt_rows:
if target.move_cursor_to_coords((maxcol,),pref_col,row):
break
def get_focus_offset_inset(self, size):
"""Return (offset rows, inset rows) for focus widget."""
(maxcol, maxrow) = size
focus_widget, pos = self.body.get_focus()
focus_rows = focus_widget.rows((maxcol,), True)
offset_rows = self.offset_rows
inset_rows = 0
if offset_rows == 0:
inum, iden = self.inset_fraction
if inum < 0 or iden < 0 or inum >= iden:
raise ListBoxError, "Invalid inset_fraction: %r"%(self.inset_fraction,)
inset_rows = focus_rows * inum // iden
if inset_rows and inset_rows >= focus_rows:
raise ListBoxError, "urwid inset_fraction error (please report)"
return offset_rows, inset_rows
def make_cursor_visible(self, size):
"""Shift the focus widget so that its cursor is visible."""
(maxcol, maxrow) = size
focus_widget, pos = self.body.get_focus()
if focus_widget is None:
return
if not focus_widget.selectable():
return
if not hasattr(focus_widget,'get_cursor_coords'):
return
cursor = focus_widget.get_cursor_coords((maxcol,))
if cursor is None:
return
cx, cy = cursor
offset_rows, inset_rows = self.get_focus_offset_inset(
(maxcol, maxrow))
if cy < inset_rows:
self.shift_focus( (maxcol,maxrow), - (cy) )
return
if offset_rows - inset_rows + cy >= maxrow:
self.shift_focus( (maxcol,maxrow), maxrow-cy-1 )
return
def keypress(self, size, key):
"""Move selection through the list elements scrolling when
necessary. 'up' and 'down' are first passed to widget in focus
in case that widget can handle them. 'page up' and 'page down'
are always handled by the ListBox.
Keystrokes handled by this widget are:
'up' up one line (or widget)
'down' down one line (or widget)
'page up' move cursor up one listbox length
'page down' move cursor down one listbox length
"""
(maxcol, maxrow) = size
if self.set_focus_pending or self.set_focus_valign_pending:
self._set_focus_complete( (maxcol,maxrow), focus=True )
focus_widget, pos = self.body.get_focus()
if focus_widget is None: # empty listbox, can't do anything
return key
if self._command_map[key] not in [CURSOR_PAGE_UP, CURSOR_PAGE_DOWN]:
if focus_widget.selectable():
key = focus_widget.keypress((maxcol,),key)
if key is None:
self.make_cursor_visible((maxcol,maxrow))
return
def actual_key(unhandled):
if unhandled:
return key
# pass off the heavy lifting
if self._command_map[key] == CURSOR_UP:
return actual_key(self._keypress_up((maxcol, maxrow)))
if self._command_map[key] == CURSOR_DOWN:
return actual_key(self._keypress_down((maxcol, maxrow)))
if self._command_map[key] == CURSOR_PAGE_UP:
return actual_key(self._keypress_page_up((maxcol, maxrow)))
if self._command_map[key] == CURSOR_PAGE_DOWN:
return actual_key(self._keypress_page_down((maxcol, maxrow)))
return key
def _keypress_up(self, size):
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
if middle is None: return True
focus_row_offset,focus_widget,focus_pos,_ignore,cursor = middle
trim_top, fill_above = top
row_offset = focus_row_offset
# look for selectable widget above
pos = focus_pos
widget = None
for widget, pos, rows in fill_above:
row_offset -= rows
if rows and widget.selectable():
# this one will do
self.change_focus((maxcol,maxrow), pos,
row_offset, 'below')
return
# at this point we must scroll
row_offset += 1
self._invalidate()
while row_offset > 0:
# need to scroll in another candidate widget
widget, pos = self.body.get_prev(pos)
if widget is None:
# cannot scroll any further
return True # keypress not handled
rows = widget.rows((maxcol,), True)
row_offset -= rows
if rows and widget.selectable():
# this one will do
self.change_focus((maxcol,maxrow), pos,
row_offset, 'below')
return
if not focus_widget.selectable() or focus_row_offset+1>=maxrow:
# just take top one if focus is not selectable
# or if focus has moved out of view
if widget is None:
self.shift_focus((maxcol,maxrow), row_offset)
return
self.change_focus((maxcol,maxrow), pos,
row_offset, 'below')
return
# check if cursor will stop scroll from taking effect
if cursor is not None:
x,y = cursor
if y+focus_row_offset+1 >= maxrow:
# cursor position is a problem,
# choose another focus
if widget is None:
# try harder to get prev widget
widget, pos = self.body.get_prev(pos)
if widget is None:
return # can't do anything
rows = widget.rows((maxcol,), True)
row_offset -= rows
if -row_offset >= rows:
# must scroll further than 1 line
row_offset = - (rows-1)
self.change_focus((maxcol,maxrow),pos,
row_offset, 'below')
return
# if all else fails, just shift the current focus.
self.shift_focus((maxcol,maxrow), focus_row_offset+1)
def _keypress_down(self, size):
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
if middle is None: return True
focus_row_offset,focus_widget,focus_pos,focus_rows,cursor=middle
trim_bottom, fill_below = bottom
row_offset = focus_row_offset + focus_rows
rows = focus_rows
# look for selectable widget below
pos = focus_pos
widget = None
for widget, pos, rows in fill_below:
if rows and widget.selectable():
# this one will do
self.change_focus((maxcol,maxrow), pos,
row_offset, 'above')
return
row_offset += rows
# at this point we must scroll
row_offset -= 1
self._invalidate()
while row_offset < maxrow:
# need to scroll in another candidate widget
widget, pos = self.body.get_next(pos)
if widget is None:
# cannot scroll any further
return True # keypress not handled
rows = widget.rows((maxcol,))
if rows and widget.selectable():
# this one will do
self.change_focus((maxcol,maxrow), pos,
row_offset, 'above')
return
row_offset += rows
if not focus_widget.selectable() or focus_row_offset+focus_rows-1 <= 0:
# just take bottom one if current is not selectable
# or if focus has moved out of view
if widget is None:
self.shift_focus((maxcol,maxrow),
row_offset-rows)
return
# FIXME: catch this bug in testcase
#self.change_focus((maxcol,maxrow), pos,
# row_offset+rows, 'above')
self.change_focus((maxcol,maxrow), pos,
row_offset-rows, 'above')
return
# check if cursor will stop scroll from taking effect
if cursor is not None:
x,y = cursor
if y+focus_row_offset-1 < 0:
# cursor position is a problem,
# choose another focus
if widget is None:
# try harder to get next widget
widget, pos = self.body.get_next(pos)
if widget is None:
return # can't do anything
else:
row_offset -= rows
if row_offset >= maxrow:
# must scroll further than 1 line
row_offset = maxrow-1
self.change_focus((maxcol,maxrow),pos,
row_offset, 'above', )
return
# if all else fails, keep the current focus.
self.shift_focus((maxcol,maxrow), focus_row_offset-1)
def _keypress_page_up(self, size):
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
if middle is None: return True
row_offset, focus_widget, focus_pos, focus_rows, cursor = middle
trim_top, fill_above = top
# topmost_visible is row_offset rows above top row of
# focus (+ve) or -row_offset rows below top row of focus (-ve)
topmost_visible = row_offset
# scroll_from_row is (first match)
# 1. topmost visible row if focus is not selectable
# 2. row containing cursor if focus has a cursor
# 3. top row of focus widget if it is visible
# 4. topmost visible row otherwise
if not focus_widget.selectable():
scroll_from_row = topmost_visible
elif cursor is not None:
x,y = cursor
scroll_from_row = -y
elif row_offset >= 0:
scroll_from_row = 0
else:
scroll_from_row = topmost_visible
# snap_rows is maximum extra rows to scroll when
# snapping to new a focus
snap_rows = topmost_visible - scroll_from_row
# move row_offset to the new desired value (1 "page" up)
row_offset = scroll_from_row + maxrow
# not used below:
scroll_from_row = topmost_visible = None
# gather potential target widgets
t = []
# add current focus
t.append((row_offset,focus_widget,focus_pos,focus_rows))
pos = focus_pos
# include widgets from calculate_visible(..)
for widget, pos, rows in fill_above:
row_offset -= rows
t.append( (row_offset, widget, pos, rows) )
# add newly visible ones, including within snap_rows
snap_region_start = len(t)
while row_offset > -snap_rows:
widget, pos = self.body.get_prev(pos)
if widget is None: break
rows = widget.rows((maxcol,))
row_offset -= rows
# determine if one below puts current one into snap rgn
if row_offset > 0:
snap_region_start += 1
t.append( (row_offset, widget, pos, rows) )
# if we can't fill the top we need to adjust the row offsets
row_offset, w, p, r = t[-1]
if row_offset > 0:
adjust = - row_offset
t = [(ro+adjust, w, p, r) for (ro,w,p,r) in t]
# if focus_widget (first in t) is off edge, remove it
row_offset, w, p, r = t[0]
if row_offset >= maxrow:
del t[0]
snap_region_start -= 1
# we'll need this soon
self.update_pref_col_from_focus((maxcol,maxrow))
# choose the topmost selectable and (newly) visible widget
# search within snap_rows then visible region
search_order = ( range( snap_region_start, len(t))
+ range( snap_region_start-1, -1, -1 ) )
#assert 0, repr((t, search_order))
bad_choices = []
cut_off_selectable_chosen = 0
for i in search_order:
row_offset, widget, pos, rows = t[i]
if not widget.selectable():
continue
if not rows:
continue
# try selecting this widget
pref_row = max(0, -row_offset)
# if completely within snap region, adjust row_offset
if rows + row_offset <= 0:
self.change_focus( (maxcol,maxrow), pos,
-(rows-1), 'below',
(self.pref_col, rows-1),
snap_rows-((-row_offset)-(rows-1)))
else:
self.change_focus( (maxcol,maxrow), pos,
row_offset, 'below',
(self.pref_col, pref_row), snap_rows )
# if we're as far up as we can scroll, take this one
if (fill_above and self.body.get_prev(fill_above[-1][1])
== (None,None) ):
pass #return
# find out where that actually puts us
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
act_row_offset, _ign1, _ign2, _ign3, _ign4 = middle
# discard chosen widget if it will reduce scroll amount
# because of a fixed cursor (absolute last resort)
if act_row_offset > row_offset+snap_rows:
bad_choices.append(i)
continue
if act_row_offset < row_offset:
bad_choices.append(i)
continue
# also discard if off top edge (second last resort)
if act_row_offset < 0:
bad_choices.append(i)
cut_off_selectable_chosen = 1
continue
return
# anything selectable is better than what follows:
if cut_off_selectable_chosen:
return
if fill_above and focus_widget.selectable():
# if we're at the top and have a selectable, return
if self.body.get_prev(fill_above[-1][1]) == (None,None):
pass #return
# if still none found choose the topmost widget
good_choices = [j for j in search_order if j not in bad_choices]
for i in good_choices + search_order:
row_offset, widget, pos, rows = t[i]
if pos == focus_pos: continue
if not rows: # never focus a 0-height widget
continue
# if completely within snap region, adjust row_offset
if rows + row_offset <= 0:
snap_rows -= (-row_offset) - (rows-1)
row_offset = -(rows-1)
self.change_focus( (maxcol,maxrow), pos,
row_offset, 'below', None,
snap_rows )
return
# no choices available, just shift current one
self.shift_focus((maxcol, maxrow), min(maxrow-1,row_offset))
# final check for pathological case where we may fall short
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
act_row_offset, _ign1, pos, _ign2, _ign3 = middle
if act_row_offset >= row_offset:
# no problem
return
# fell short, try to select anything else above
if not t:
return
_ign1, _ign2, pos, _ign3 = t[-1]
widget, pos = self.body.get_prev(pos)
if widget is None:
# no dice, we're stuck here
return
# bring in only one row if possible
rows = widget.rows((maxcol,), True)
self.change_focus((maxcol,maxrow), pos, -(rows-1),
'below', (self.pref_col, rows-1), 0 )
def _keypress_page_down(self, size):
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
if middle is None: return True
row_offset, focus_widget, focus_pos, focus_rows, cursor = middle
trim_bottom, fill_below = bottom
# bottom_edge is maxrow-focus_pos rows below top row of focus
bottom_edge = maxrow - row_offset
# scroll_from_row is (first match)
# 1. bottom edge if focus is not selectable
# 2. row containing cursor + 1 if focus has a cursor
# 3. bottom edge of focus widget if it is visible
# 4. bottom edge otherwise
if not focus_widget.selectable():
scroll_from_row = bottom_edge
elif cursor is not None:
x,y = cursor
scroll_from_row = y + 1
elif bottom_edge >= focus_rows:
scroll_from_row = focus_rows
else:
scroll_from_row = bottom_edge
# snap_rows is maximum extra rows to scroll when
# snapping to new a focus
snap_rows = bottom_edge - scroll_from_row
# move row_offset to the new desired value (1 "page" down)
row_offset = -scroll_from_row
# not used below:
scroll_from_row = bottom_edge = None
# gather potential target widgets
t = []
# add current focus
t.append((row_offset,focus_widget,focus_pos,focus_rows))
pos = focus_pos
row_offset += focus_rows
# include widgets from calculate_visible(..)
for widget, pos, rows in fill_below:
t.append( (row_offset, widget, pos, rows) )
row_offset += rows
# add newly visible ones, including within snap_rows
snap_region_start = len(t)
while row_offset < maxrow+snap_rows:
widget, pos = self.body.get_next(pos)
if widget is None: break
rows = widget.rows((maxcol,))
t.append( (row_offset, widget, pos, rows) )
row_offset += rows
# determine if one above puts current one into snap rgn
if row_offset < maxrow:
snap_region_start += 1
# if we can't fill the bottom we need to adjust the row offsets
row_offset, w, p, rows = t[-1]
if row_offset + rows < maxrow:
adjust = maxrow - (row_offset + rows)
t = [(ro+adjust, w, p, r) for (ro,w,p,r) in t]
# if focus_widget (first in t) is off edge, remove it
row_offset, w, p, rows = t[0]
if row_offset+rows <= 0:
del t[0]
snap_region_start -= 1
# we'll need this soon
self.update_pref_col_from_focus((maxcol,maxrow))
# choose the bottommost selectable and (newly) visible widget
# search within snap_rows then visible region
search_order = ( range( snap_region_start, len(t))
+ range( snap_region_start-1, -1, -1 ) )
#assert 0, repr((t, search_order))
bad_choices = []
cut_off_selectable_chosen = 0
for i in search_order:
row_offset, widget, pos, rows = t[i]
if not widget.selectable():
continue
if not rows:
continue
# try selecting this widget
pref_row = min(maxrow-row_offset-1, rows-1)
# if completely within snap region, adjust row_offset
if row_offset >= maxrow:
self.change_focus( (maxcol,maxrow), pos,
maxrow-1, 'above',
(self.pref_col, 0),
snap_rows+maxrow-row_offset-1 )
else:
self.change_focus( (maxcol,maxrow), pos,
row_offset, 'above',
(self.pref_col, pref_row), snap_rows )
# find out where that actually puts us
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
act_row_offset, _ign1, _ign2, _ign3, _ign4 = middle
# discard chosen widget if it will reduce scroll amount
# because of a fixed cursor (absolute last resort)
if act_row_offset < row_offset-snap_rows:
bad_choices.append(i)
continue
if act_row_offset > row_offset:
bad_choices.append(i)
continue
# also discard if off top edge (second last resort)
if act_row_offset+rows > maxrow:
bad_choices.append(i)
cut_off_selectable_chosen = 1
continue
return
# anything selectable is better than what follows:
if cut_off_selectable_chosen:
return
# if still none found choose the bottommost widget
good_choices = [j for j in search_order if j not in bad_choices]
for i in good_choices + search_order:
row_offset, widget, pos, rows = t[i]
if pos == focus_pos: continue
if not rows: # never focus a 0-height widget
continue
# if completely within snap region, adjust row_offset
if row_offset >= maxrow:
snap_rows -= snap_rows+maxrow-row_offset-1
row_offset = maxrow-1
self.change_focus( (maxcol,maxrow), pos,
row_offset, 'above', None,
snap_rows )
return
# no choices available, just shift current one
self.shift_focus((maxcol, maxrow), max(1-focus_rows,row_offset))
# final check for pathological case where we may fall short
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
act_row_offset, _ign1, pos, _ign2, _ign3 = middle
if act_row_offset <= row_offset:
# no problem
return
# fell short, try to select anything else below
if not t:
return
_ign1, _ign2, pos, _ign3 = t[-1]
widget, pos = self.body.get_next(pos)
if widget is None:
# no dice, we're stuck here
return
# bring in only one row if possible
rows = widget.rows((maxcol,), True)
self.change_focus((maxcol,maxrow), pos, maxrow-1,
'above', (self.pref_col, 0), 0 )
def mouse_event(self, size, event, button, col, row, focus):
"""
Pass the event to the contained widgets.
May change focus on button 1 press.
"""
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible((maxcol, maxrow),
focus=True)
if middle is None:
return False
_ignore, focus_widget, focus_pos, focus_rows, cursor = middle
trim_top, fill_above = top
_ignore, fill_below = bottom
fill_above.reverse() # fill_above is in bottom-up order
w_list = ( fill_above +
[ (focus_widget, focus_pos, focus_rows) ] +
fill_below )
wrow = -trim_top
for w, w_pos, w_rows in w_list:
if wrow + w_rows > row:
break
wrow += w_rows
else:
return False
focus = focus and w == focus_widget
if is_mouse_press(event) and button==1:
if w.selectable():
self.change_focus((maxcol,maxrow), w_pos, wrow)
if not hasattr(w,'mouse_event'):
return False
return w.mouse_event((maxcol,), event, button, col, row-wrow,
focus)
def ends_visible(self, size, focus=False):
"""
Return a list that may contain ``'top'`` and/or ``'bottom'``.
i.e. this function will return one of: [], [``'top'``],
[``'bottom'``] or [``'top'``, ``'bottom'``].
convenience function for checking whether the top and bottom
of the list are visible
"""
(maxcol, maxrow) = size
l = []
middle,top,bottom = self.calculate_visible( (maxcol,maxrow),
focus=focus )
if middle is None: # empty listbox
return ['top','bottom']
trim_top, above = top
trim_bottom, below = bottom
if trim_bottom == 0:
row_offset, w, pos, rows, c = middle
row_offset += rows
for w, pos, rows in below:
row_offset += rows
if row_offset < maxrow:
l.append('bottom')
elif self.body.get_next(pos) == (None,None):
l.append('bottom')
if trim_top == 0:
row_offset, w, pos, rows, c = middle
for w, pos, rows in above:
row_offset -= rows
if self.body.get_prev(pos) == (None,None):
l.insert(0, 'top')
return l
def __iter__(self):
"""
Return an iterator over the positions in this ListBox.
If self.body does not implement positions() then iterate
from the focus widget down to the bottom, then from above
the focus up to the top. This is the best we can do with
a minimal list walker implementation.
"""
positions_fn = getattr(self.body, 'positions', None)
if positions_fn:
for pos in positions_fn():
yield pos
return
focus_widget, focus_pos = self.body.get_focus()
if focus_widget is None:
return
pos = focus_pos
while True:
yield pos
w, pos = self.body.get_next(pos)
if not w: break
pos = focus_pos
while True:
w, pos = self.body.get_prev(pos)
if not w: break
yield pos
def __reversed__(self):
"""
Return a reversed iterator over the positions in this ListBox.
If :attr:`body` does not implement :meth:`positions` then iterate
from above the focus widget up to the top, then from the focus
widget down to the bottom. Note that this is not actually the
reverse of what `__iter__()` produces, but this is the best we can
do with a minimal list walker implementation.
"""
positions_fn = getattr(self.body, 'positions', None)
if positions_fn:
for pos in positions_fn(reverse=True):
yield pos
return
focus_widget, focus_pos = self.body.get_focus()
if focus_widget is None:
return
pos = focus_pos
while True:
w, pos = self.body.get_prev(pos)
if not w: break
yield pos
pos = focus_pos
while True:
yield pos
w, pos = self.body.get_next(pos)
if not w: break
| lgpl-2.1 |
Ch00k/ansible | lib/ansible/module_utils/known_hosts.py | 80 | 6716 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hmac
import urlparse
try:
from hashlib import sha1
except ImportError:
import sha as sha1
HASHED_KEY_MAGIC = "|1|"
def add_git_host_key(module, url, accept_hostkey=True, create_dir=True):
""" idempotently add a git url hostkey """
if is_ssh_url(url):
fqdn = get_fqdn(url)
if fqdn:
known_host = check_hostkey(module, fqdn)
if not known_host:
if accept_hostkey:
rc, out, err = add_host_key(module, fqdn, create_dir=create_dir)
if rc != 0:
module.fail_json(msg="failed to add %s hostkey: %s" % (fqdn, out + err))
else:
module.fail_json(msg="%s has an unknown hostkey. Set accept_hostkey to True or manually add the hostkey prior to running the git module" % fqdn)
def is_ssh_url(url):
""" check if url is ssh """
if "@" in url and "://" not in url:
return True
for scheme in "ssh://", "git+ssh://", "ssh+git://":
if url.startswith(scheme):
return True
return False
def get_fqdn(repo_url):
""" chop the hostname out of a url """
result = None
if "@" in repo_url and "://" not in repo_url:
# most likely an user@host:path or user@host/path type URL
repo_url = repo_url.split("@", 1)[1]
if ":" in repo_url:
repo_url = repo_url.split(":")[0]
result = repo_url
elif "/" in repo_url:
repo_url = repo_url.split("/")[0]
result = repo_url
elif "://" in repo_url:
# this should be something we can parse with urlparse
parts = urlparse.urlparse(repo_url)
# parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
# ensure we actually have a parts[1] before continuing.
if parts[1] != '':
result = parts[1]
if ":" in result:
result = result.split(":")[0]
if "@" in result:
result = result.split("@", 1)[1]
return result
def check_hostkey(module, fqdn):
return not not_in_host_file(module, fqdn)
# this is a variant of code found in connection_plugins/paramiko.py and we should modify
# the paramiko code to import and use this.
def not_in_host_file(self, host):
if 'USER' in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
user_host_file = os.path.expanduser(user_host_file)
host_file_list = []
host_file_list.append(user_host_file)
host_file_list.append("/etc/ssh/ssh_known_hosts")
host_file_list.append("/etc/ssh/ssh_known_hosts2")
hfiles_not_found = 0
for hf in host_file_list:
if not os.path.exists(hf):
hfiles_not_found += 1
continue
try:
host_fh = open(hf)
except IOError, e:
hfiles_not_found += 1
continue
else:
data = host_fh.read()
host_fh.close()
for line in data.split("\n"):
if line is None or " " not in line:
continue
tokens = line.split()
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt,kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2)
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode('base64'):
return False
except:
# invalid hashed host key, skip it
continue
else:
# standard host file entry
if host in tokens[0]:
return False
return True
def add_host_key(module, fqdn, key_type="rsa", create_dir=False):
""" use ssh-keyscan to add the hostkey """
result = False
keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
if 'USER' in os.environ:
user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_ssh_dir = "~/.ssh/"
user_host_file = "~/.ssh/known_hosts"
user_ssh_dir = os.path.expanduser(user_ssh_dir)
if not os.path.exists(user_ssh_dir):
if create_dir:
try:
os.makedirs(user_ssh_dir, 0700)
except:
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
else:
module.fail_json(msg="%s does not exist" % user_ssh_dir)
elif not os.path.isdir(user_ssh_dir):
module.fail_json(msg="%s is not a directory" % user_ssh_dir)
this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
rc, out, err = module.run_command(this_cmd)
module.append_to_file(user_host_file, out)
return rc, out, err
| gpl-3.0 |
crepererum/invenio | invenio/modules/previewer/previewerext/gview.py | 15 | 1631 | # This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from flask import render_template, request
def can_preview(f):
'''
Returns if filetype can be previewed.
'''
return f.superformat in ['.pdf', '.jpeg', '.png', '.gif', '.tiff', '.bmp',
'.mpeg4', '.3gpp', '.mov', '.avi', '.mpegps',
'.wmv', '.flv', '.txt', '.css', '.html', '.php',
'.c', '.cpp', '.h', '.hpp', '.js', '.doc', '.docx',
'.xls', '.xlsx', '.ppt', '.pptx', '.pages', '.ai',
'.psd', '.dfx', '.svg', '.eps', '.ps', '.ttf',
'.xps', '.zip', '.rar']
def preview(f):
'''
Returns appropiate template and passes the filea and an embed flag.
'''
return render_template("previewer/gview.html", f=f,
embed=request.args.get('embed', type=bool))
| gpl-2.0 |
liberorbis/libernext | env/lib/python2.7/site-packages/celery/tests/bin/test_worker.py | 4 | 24058 | from __future__ import absolute_import
import logging
import os
import sys
from functools import wraps
from billiard import current_process
from kombu import Exchange, Queue
from celery import platforms
from celery import signals
from celery.app import trace
from celery.apps import worker as cd
from celery.bin.worker import worker, main as worker_main
from celery.exceptions import (
ImproperlyConfigured, WorkerShutdown, WorkerTerminate,
)
from celery.utils.log import ensure_process_aware_logger
from celery.worker import state
from celery.tests.case import (
AppCase,
Mock,
SkipTest,
WhateverIO,
patch,
skip_if_pypy,
skip_if_jython,
)
ensure_process_aware_logger()
class WorkerAppCase(AppCase):
def tearDown(self):
super(WorkerAppCase, self).tearDown()
trace.reset_worker_optimizations()
def disable_stdouts(fun):
@wraps(fun)
def disable(*args, **kwargs):
prev_out, prev_err = sys.stdout, sys.stderr
prev_rout, prev_rerr = sys.__stdout__, sys.__stderr__
sys.stdout = sys.__stdout__ = WhateverIO()
sys.stderr = sys.__stderr__ = WhateverIO()
try:
return fun(*args, **kwargs)
finally:
sys.stdout = prev_out
sys.stderr = prev_err
sys.__stdout__ = prev_rout
sys.__stderr__ = prev_rerr
return disable
class Worker(cd.Worker):
redirect_stdouts = False
def start(self, *args, **kwargs):
self.on_start()
class test_Worker(WorkerAppCase):
Worker = Worker
@disable_stdouts
def test_queues_string(self):
w = self.app.Worker()
w.setup_queues('foo,bar,baz')
self.assertTrue('foo' in self.app.amqp.queues)
@disable_stdouts
def test_cpu_count(self):
with patch('celery.worker.cpu_count') as cpu_count:
cpu_count.side_effect = NotImplementedError()
w = self.app.Worker(concurrency=None)
self.assertEqual(w.concurrency, 2)
w = self.app.Worker(concurrency=5)
self.assertEqual(w.concurrency, 5)
@disable_stdouts
def test_windows_B_option(self):
self.app.IS_WINDOWS = True
with self.assertRaises(SystemExit):
worker(app=self.app).run(beat=True)
def test_setup_concurrency_very_early(self):
x = worker()
x.run = Mock()
with self.assertRaises(ImportError):
x.execute_from_commandline(['worker', '-P', 'xyzybox'])
def test_run_from_argv_basic(self):
x = worker(app=self.app)
x.run = Mock()
x.maybe_detach = Mock()
def run(*args, **kwargs):
pass
x.run = run
x.run_from_argv('celery', [])
self.assertTrue(x.maybe_detach.called)
def test_maybe_detach(self):
x = worker(app=self.app)
with patch('celery.bin.worker.detached_celeryd') as detached:
x.maybe_detach([])
self.assertFalse(detached.called)
with self.assertRaises(SystemExit):
x.maybe_detach(['--detach'])
self.assertTrue(detached.called)
@disable_stdouts
def test_invalid_loglevel_gives_error(self):
x = worker(app=self.app)
with self.assertRaises(SystemExit):
x.run(loglevel='GRIM_REAPER')
def test_no_loglevel(self):
self.app.Worker = Mock()
worker(app=self.app).run(loglevel=None)
def test_tasklist(self):
worker = self.app.Worker()
self.assertTrue(worker.app.tasks)
self.assertTrue(worker.app.finalized)
self.assertTrue(worker.tasklist(include_builtins=True))
worker.tasklist(include_builtins=False)
def test_extra_info(self):
worker = self.app.Worker()
worker.loglevel = logging.WARNING
self.assertFalse(worker.extra_info())
worker.loglevel = logging.INFO
self.assertTrue(worker.extra_info())
@disable_stdouts
def test_loglevel_string(self):
worker = self.Worker(app=self.app, loglevel='INFO')
self.assertEqual(worker.loglevel, logging.INFO)
@disable_stdouts
def test_run_worker(self):
handlers = {}
class Signals(platforms.Signals):
def __setitem__(self, sig, handler):
handlers[sig] = handler
p = platforms.signals
platforms.signals = Signals()
try:
w = self.Worker(app=self.app)
w._isatty = False
w.on_start()
for sig in 'SIGINT', 'SIGHUP', 'SIGTERM':
self.assertIn(sig, handlers)
handlers.clear()
w = self.Worker(app=self.app)
w._isatty = True
w.on_start()
for sig in 'SIGINT', 'SIGTERM':
self.assertIn(sig, handlers)
self.assertNotIn('SIGHUP', handlers)
finally:
platforms.signals = p
@disable_stdouts
def test_startup_info(self):
worker = self.Worker(app=self.app)
worker.on_start()
self.assertTrue(worker.startup_info())
worker.loglevel = logging.DEBUG
self.assertTrue(worker.startup_info())
worker.loglevel = logging.INFO
self.assertTrue(worker.startup_info())
worker.autoscale = 13, 10
self.assertTrue(worker.startup_info())
prev_loader = self.app.loader
worker = self.Worker(app=self.app, queues='foo,bar,baz,xuzzy,do,re,mi')
self.app.loader = Mock()
self.app.loader.__module__ = 'acme.baked_beans'
self.assertTrue(worker.startup_info())
self.app.loader = Mock()
self.app.loader.__module__ = 'celery.loaders.foo'
self.assertTrue(worker.startup_info())
from celery.loaders.app import AppLoader
self.app.loader = AppLoader(app=self.app)
self.assertTrue(worker.startup_info())
self.app.loader = prev_loader
worker.send_events = True
self.assertTrue(worker.startup_info())
# test when there are too few output lines
# to draft the ascii art onto
prev, cd.ARTLINES = cd.ARTLINES, ['the quick brown fox']
try:
self.assertTrue(worker.startup_info())
finally:
cd.ARTLINES = prev
@disable_stdouts
def test_run(self):
self.Worker(app=self.app).on_start()
self.Worker(app=self.app, purge=True).on_start()
worker = self.Worker(app=self.app)
worker.on_start()
@disable_stdouts
def test_purge_messages(self):
self.Worker(app=self.app).purge_messages()
@disable_stdouts
def test_init_queues(self):
app = self.app
c = app.conf
app.amqp.queues = app.amqp.Queues({
'celery': {'exchange': 'celery',
'routing_key': 'celery'},
'video': {'exchange': 'video',
'routing_key': 'video'},
})
worker = self.Worker(app=self.app)
worker.setup_queues(['video'])
self.assertIn('video', app.amqp.queues)
self.assertIn('video', app.amqp.queues.consume_from)
self.assertIn('celery', app.amqp.queues)
self.assertNotIn('celery', app.amqp.queues.consume_from)
c.CELERY_CREATE_MISSING_QUEUES = False
del(app.amqp.queues)
with self.assertRaises(ImproperlyConfigured):
self.Worker(app=self.app).setup_queues(['image'])
del(app.amqp.queues)
c.CELERY_CREATE_MISSING_QUEUES = True
worker = self.Worker(app=self.app)
worker.setup_queues(['image'])
self.assertIn('image', app.amqp.queues.consume_from)
self.assertEqual(
Queue('image', Exchange('image'), routing_key='image'),
app.amqp.queues['image'],
)
@disable_stdouts
def test_autoscale_argument(self):
worker1 = self.Worker(app=self.app, autoscale='10,3')
self.assertListEqual(worker1.autoscale, [10, 3])
worker2 = self.Worker(app=self.app, autoscale='10')
self.assertListEqual(worker2.autoscale, [10, 0])
self.assert_no_logging_side_effect()
def test_include_argument(self):
worker1 = self.Worker(app=self.app, include='os')
self.assertListEqual(worker1.include, ['os'])
worker2 = self.Worker(app=self.app,
include='os,sys')
self.assertListEqual(worker2.include, ['os', 'sys'])
self.Worker(app=self.app, include=['os', 'sys'])
@disable_stdouts
def test_unknown_loglevel(self):
with self.assertRaises(SystemExit):
worker(app=self.app).run(loglevel='ALIEN')
worker1 = self.Worker(app=self.app, loglevel=0xFFFF)
self.assertEqual(worker1.loglevel, 0xFFFF)
@disable_stdouts
@patch('os._exit')
def test_warns_if_running_as_privileged_user(self, _exit):
app = self.app
if app.IS_WINDOWS:
raise SkipTest('Not applicable on Windows')
with patch('os.getuid') as getuid:
getuid.return_value = 0
self.app.conf.CELERY_ACCEPT_CONTENT = ['pickle']
worker = self.Worker(app=self.app)
worker.on_start()
_exit.assert_called_with(1)
from celery import platforms
platforms.C_FORCE_ROOT = True
try:
with self.assertWarnsRegex(
RuntimeWarning,
r'absolutely not recommended'):
worker = self.Worker(app=self.app)
worker.on_start()
finally:
platforms.C_FORCE_ROOT = False
self.app.conf.CELERY_ACCEPT_CONTENT = ['json']
with self.assertWarnsRegex(
RuntimeWarning,
r'absolutely not recommended'):
worker = self.Worker(app=self.app)
worker.on_start()
@disable_stdouts
def test_redirect_stdouts(self):
self.Worker(app=self.app, redirect_stdouts=False)
with self.assertRaises(AttributeError):
sys.stdout.logger
@disable_stdouts
def test_on_start_custom_logging(self):
self.app.log.redirect_stdouts = Mock()
worker = self.Worker(app=self.app, redirect_stoutds=True)
worker._custom_logging = True
worker.on_start()
self.assertFalse(self.app.log.redirect_stdouts.called)
def test_setup_logging_no_color(self):
worker = self.Worker(
app=self.app, redirect_stdouts=False, no_color=True,
)
prev, self.app.log.setup = self.app.log.setup, Mock()
try:
worker.setup_logging()
self.assertFalse(self.app.log.setup.call_args[1]['colorize'])
finally:
self.app.log.setup = prev
@disable_stdouts
def test_startup_info_pool_is_str(self):
worker = self.Worker(app=self.app, redirect_stdouts=False)
worker.pool_cls = 'foo'
worker.startup_info()
def test_redirect_stdouts_already_handled(self):
logging_setup = [False]
@signals.setup_logging.connect
def on_logging_setup(**kwargs):
logging_setup[0] = True
try:
worker = self.Worker(app=self.app, redirect_stdouts=False)
worker.app.log.already_setup = False
worker.setup_logging()
self.assertTrue(logging_setup[0])
with self.assertRaises(AttributeError):
sys.stdout.logger
finally:
signals.setup_logging.disconnect(on_logging_setup)
@disable_stdouts
def test_platform_tweaks_osx(self):
class OSXWorker(Worker):
proxy_workaround_installed = False
def osx_proxy_detection_workaround(self):
self.proxy_workaround_installed = True
worker = OSXWorker(app=self.app, redirect_stdouts=False)
def install_HUP_nosupport(controller):
controller.hup_not_supported_installed = True
class Controller(object):
pass
prev = cd.install_HUP_not_supported_handler
cd.install_HUP_not_supported_handler = install_HUP_nosupport
try:
worker.app.IS_OSX = True
controller = Controller()
worker.install_platform_tweaks(controller)
self.assertTrue(controller.hup_not_supported_installed)
self.assertTrue(worker.proxy_workaround_installed)
finally:
cd.install_HUP_not_supported_handler = prev
@disable_stdouts
def test_general_platform_tweaks(self):
restart_worker_handler_installed = [False]
def install_worker_restart_handler(worker):
restart_worker_handler_installed[0] = True
class Controller(object):
pass
prev = cd.install_worker_restart_handler
cd.install_worker_restart_handler = install_worker_restart_handler
try:
worker = self.Worker(app=self.app)
worker.app.IS_OSX = False
worker.install_platform_tweaks(Controller())
self.assertTrue(restart_worker_handler_installed[0])
finally:
cd.install_worker_restart_handler = prev
@disable_stdouts
def test_on_consumer_ready(self):
worker_ready_sent = [False]
@signals.worker_ready.connect
def on_worker_ready(**kwargs):
worker_ready_sent[0] = True
self.Worker(app=self.app).on_consumer_ready(object())
self.assertTrue(worker_ready_sent[0])
class test_funs(WorkerAppCase):
def test_active_thread_count(self):
self.assertTrue(cd.active_thread_count())
@disable_stdouts
def test_set_process_status(self):
try:
__import__('setproctitle')
except ImportError:
raise SkipTest('setproctitle not installed')
worker = Worker(app=self.app, hostname='xyzza')
prev1, sys.argv = sys.argv, ['Arg0']
try:
st = worker.set_process_status('Running')
self.assertIn('celeryd', st)
self.assertIn('xyzza', st)
self.assertIn('Running', st)
prev2, sys.argv = sys.argv, ['Arg0', 'Arg1']
try:
st = worker.set_process_status('Running')
self.assertIn('celeryd', st)
self.assertIn('xyzza', st)
self.assertIn('Running', st)
self.assertIn('Arg1', st)
finally:
sys.argv = prev2
finally:
sys.argv = prev1
@disable_stdouts
def test_parse_options(self):
cmd = worker()
cmd.app = self.app
opts, args = cmd.parse_options('worker', ['--concurrency=512',
'--heartbeat-interval=10'])
self.assertEqual(opts.concurrency, 512)
self.assertEqual(opts.heartbeat_interval, 10)
@disable_stdouts
def test_main(self):
p, cd.Worker = cd.Worker, Worker
s, sys.argv = sys.argv, ['worker', '--discard']
try:
worker_main(app=self.app)
finally:
cd.Worker = p
sys.argv = s
class test_signal_handlers(WorkerAppCase):
class _Worker(object):
stopped = False
terminated = False
def stop(self, in_sighandler=False):
self.stopped = True
def terminate(self, in_sighandler=False):
self.terminated = True
def psig(self, fun, *args, **kwargs):
handlers = {}
class Signals(platforms.Signals):
def __setitem__(self, sig, handler):
handlers[sig] = handler
p, platforms.signals = platforms.signals, Signals()
try:
fun(*args, **kwargs)
return handlers
finally:
platforms.signals = p
@disable_stdouts
def test_worker_int_handler(self):
worker = self._Worker()
handlers = self.psig(cd.install_worker_int_handler, worker)
next_handlers = {}
state.should_stop = False
state.should_terminate = False
class Signals(platforms.Signals):
def __setitem__(self, sig, handler):
next_handlers[sig] = handler
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 3
p, platforms.signals = platforms.signals, Signals()
try:
handlers['SIGINT']('SIGINT', object())
self.assertTrue(state.should_stop)
finally:
platforms.signals = p
state.should_stop = False
try:
next_handlers['SIGINT']('SIGINT', object())
self.assertTrue(state.should_terminate)
finally:
state.should_terminate = False
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
p, platforms.signals = platforms.signals, Signals()
try:
with self.assertRaises(WorkerShutdown):
handlers['SIGINT']('SIGINT', object())
finally:
platforms.signals = p
with self.assertRaises(WorkerTerminate):
next_handlers['SIGINT']('SIGINT', object())
@disable_stdouts
def test_worker_int_handler_only_stop_MainProcess(self):
try:
import _multiprocessing # noqa
except ImportError:
raise SkipTest('only relevant for multiprocessing')
process = current_process()
name, process.name = process.name, 'OtherProcess'
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 3
try:
worker = self._Worker()
handlers = self.psig(cd.install_worker_int_handler, worker)
handlers['SIGINT']('SIGINT', object())
self.assertTrue(state.should_stop)
finally:
process.name = name
state.should_stop = False
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
try:
worker = self._Worker()
handlers = self.psig(cd.install_worker_int_handler, worker)
with self.assertRaises(WorkerShutdown):
handlers['SIGINT']('SIGINT', object())
finally:
process.name = name
state.should_stop = False
@disable_stdouts
def test_install_HUP_not_supported_handler(self):
worker = self._Worker()
handlers = self.psig(cd.install_HUP_not_supported_handler, worker)
handlers['SIGHUP']('SIGHUP', object())
@disable_stdouts
def test_worker_term_hard_handler_only_stop_MainProcess(self):
try:
import _multiprocessing # noqa
except ImportError:
raise SkipTest('only relevant for multiprocessing')
process = current_process()
name, process.name = process.name, 'OtherProcess'
try:
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 3
worker = self._Worker()
handlers = self.psig(
cd.install_worker_term_hard_handler, worker)
try:
handlers['SIGQUIT']('SIGQUIT', object())
self.assertTrue(state.should_terminate)
finally:
state.should_terminate = False
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
worker = self._Worker()
handlers = self.psig(
cd.install_worker_term_hard_handler, worker)
with self.assertRaises(WorkerTerminate):
handlers['SIGQUIT']('SIGQUIT', object())
finally:
process.name = name
@disable_stdouts
def test_worker_term_handler_when_threads(self):
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 3
worker = self._Worker()
handlers = self.psig(cd.install_worker_term_handler, worker)
try:
handlers['SIGTERM']('SIGTERM', object())
self.assertTrue(state.should_stop)
finally:
state.should_stop = False
@disable_stdouts
def test_worker_term_handler_when_single_thread(self):
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
worker = self._Worker()
handlers = self.psig(cd.install_worker_term_handler, worker)
try:
with self.assertRaises(WorkerShutdown):
handlers['SIGTERM']('SIGTERM', object())
finally:
state.should_stop = False
@patch('sys.__stderr__')
@skip_if_pypy
@skip_if_jython
def test_worker_cry_handler(self, stderr):
handlers = self.psig(cd.install_cry_handler)
self.assertIsNone(handlers['SIGUSR1']('SIGUSR1', object()))
self.assertTrue(stderr.write.called)
@disable_stdouts
def test_worker_term_handler_only_stop_MainProcess(self):
try:
import _multiprocessing # noqa
except ImportError:
raise SkipTest('only relevant for multiprocessing')
process = current_process()
name, process.name = process.name, 'OtherProcess'
try:
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 3
worker = self._Worker()
handlers = self.psig(cd.install_worker_term_handler, worker)
handlers['SIGTERM']('SIGTERM', object())
self.assertTrue(state.should_stop)
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
worker = self._Worker()
handlers = self.psig(cd.install_worker_term_handler, worker)
with self.assertRaises(WorkerShutdown):
handlers['SIGTERM']('SIGTERM', object())
finally:
process.name = name
state.should_stop = False
@disable_stdouts
@patch('celery.platforms.close_open_fds')
@patch('atexit.register')
@patch('os.close')
def test_worker_restart_handler(self, _close, register, close_open):
if getattr(os, 'execv', None) is None:
raise SkipTest('platform does not have excv')
argv = []
def _execv(*args):
argv.extend(args)
execv, os.execv = os.execv, _execv
try:
worker = self._Worker()
handlers = self.psig(cd.install_worker_restart_handler, worker)
handlers['SIGHUP']('SIGHUP', object())
self.assertTrue(state.should_stop)
self.assertTrue(register.called)
callback = register.call_args[0][0]
callback()
self.assertTrue(argv)
finally:
os.execv = execv
state.should_stop = False
@disable_stdouts
def test_worker_term_hard_handler_when_threaded(self):
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 3
worker = self._Worker()
handlers = self.psig(cd.install_worker_term_hard_handler, worker)
try:
handlers['SIGQUIT']('SIGQUIT', object())
self.assertTrue(state.should_terminate)
finally:
state.should_terminate = False
@disable_stdouts
def test_worker_term_hard_handler_when_single_threaded(self):
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
worker = self._Worker()
handlers = self.psig(cd.install_worker_term_hard_handler, worker)
with self.assertRaises(WorkerTerminate):
handlers['SIGQUIT']('SIGQUIT', object())
| gpl-2.0 |
JohannesBuchner/doit | doc/samples/parameters.py | 5 | 1732 | def task_py_params():
def show_params(param1, param2):
print(param1)
print(5 + param2)
return {'actions':[(show_params,)],
'params':[{'name':'param1',
'short':'p',
'default':'default value'},
{'name':'param2',
'long':'param2',
'type': int,
'default':0}],
'verbosity':2,
}
def task_py_params_list():
def print_a_list(list):
for item in list:
print(item)
return {'actions':[(print_a_list,)],
'params':[{'name':'list',
'short':'l',
'long': 'list',
'type': list,
'default': [],
'help': 'Collect a list with multiple -l flags'}],
'verbosity':2,
}
def task_py_params_choice():
def print_choice(choice):
print(choice)
return {'actions':[(print_choice,)],
'params':[{'name':'choice',
'short':'c',
'long': 'choice',
'type': str,
'choices': (('this', ''), ('that', '')),
'default': 'this',
'help': 'Choose between this and that'}],
'verbosity':2,}
def task_cmd_params():
return {'actions':["echo mycmd %(flag)s xxx"],
'params':[{'name':'flag',
'short':'f',
'long': 'flag',
'default': '',
'help': 'helpful message about this flag'}],
'verbosity': 2
}
| mit |
stackforge/monasca-api | monasca_api/policies/versions.py | 2 | 1175 | # Copyright 2018 OP5 AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_policy import policy
from monasca_api import policies
CONF = cfg.CONF
VERSIONS_ROLES = policies.roles_list_to_check_str(cfg.CONF.security.versions_roles)
rules = [
policy.DocumentedRuleDefault(
name='api:versions',
check_str=VERSIONS_ROLES,
description='List supported versions '
'or get the details about the specified version of Monasca API.',
operations=[
{'path': '/', 'method': 'GET'},
{'path': '/v2.0', 'method': 'GET'}
]
),
]
def list_rules():
return rules
| apache-2.0 |
uclaros/QGIS | tests/src/python/test_qgssymbolexpressionvariables.py | 45 | 4493 | # -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgssymbolexpressionvariables.py
---------------------
Date : January 2016
Copyright : (C) 2016 by Matthias Kuhn
Email : matthias at opengis dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'January 2016'
__copyright__ = '(C) 2016, Matthiasd Kuhn'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import QSize
from qgis.core import (
QgsVectorLayer,
QgsProject,
QgsRectangle,
QgsMultiRenderChecker,
QgsSingleSymbolRenderer,
QgsFillSymbol,
QgsRenderContext,
QgsProperty,
QgsSymbolLayer
)
from qgis.testing import unittest, start_app
from qgis.testing.mocked import get_iface
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsSymbolExpressionVariables(unittest.TestCase):
def setUp(self):
myShpFile = os.path.join(TEST_DATA_DIR, 'polys.shp')
self.layer = QgsVectorLayer(myShpFile, 'Polys', 'ogr')
QgsProject.instance().addMapLayer(self.layer)
self.iface = get_iface()
rendered_layers = [self.layer]
self.mapsettings = self.iface.mapCanvas().mapSettings()
self.mapsettings.setOutputSize(QSize(400, 400))
self.mapsettings.setOutputDpi(96)
self.mapsettings.setExtent(QgsRectangle(-163, 22, -70, 52))
self.mapsettings.setLayers(rendered_layers)
def tearDown(self):
QgsProject.instance().removeAllMapLayers()
def testPartNum(self):
# Create rulebased style
sym1 = QgsFillSymbol.createSimple({'color': '#fdbf6f', 'outline_color': 'black'})
renderer = QgsSingleSymbolRenderer(sym1)
renderer.symbols(QgsRenderContext())[0].symbolLayers()[0].setDataDefinedProperty(QgsSymbolLayer.PropertyFillColor, QgsProperty.fromExpression('color_rgb( (@geometry_part_num - 1) * 200, 0, 0 )'))
self.layer.setRenderer(renderer)
# Setup rendering check
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlName('expected_geometry_part_num')
result = renderchecker.runTest('part_geometry_part_num')
self.assertTrue(result)
def testPartCount(self):
# Create rulebased style
sym1 = QgsFillSymbol.createSimple({'color': '#fdbf6f', 'outline_color': 'black'})
renderer = QgsSingleSymbolRenderer(sym1)
renderer.symbols(QgsRenderContext())[0].symbolLayers()[0].setDataDefinedProperty(QgsSymbolLayer.PropertyFillColor, QgsProperty.fromExpression('color_rgb( (@geometry_part_count - 1) * 200, 0, 0 )'))
self.layer.setRenderer(renderer)
# Setup rendering check
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlName('expected_geometry_part_count')
result = renderchecker.runTest('part_geometry_part_count')
self.assertTrue(result)
def testSymbolColor(self):
# Create rulebased style
sym1 = QgsFillSymbol.createSimple({'color': '#ff0000', 'outline_color': 'black'})
renderer = QgsSingleSymbolRenderer(sym1)
renderer.symbols(QgsRenderContext())[0].symbolLayers()[0].setDataDefinedProperty(QgsSymbolLayer.PropertyFillColor, QgsProperty.fromExpression('set_color_part( @symbol_color, \'value\', "Value" * 4)'))
self.layer.setRenderer(renderer)
# Setup rendering check
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlName('expected_symbol_color_variable')
result = renderchecker.runTest('symbol_color_variable', 50)
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
cfg2015/EPT-2015-2 | addons/fetchmail/fetchmail.py | 64 | 15663 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
from imaplib import IMAP4
from imaplib import IMAP4_SSL
from poplib import POP3
from poplib import POP3_SSL
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import zipfile
import base64
from openerp import addons
from openerp.osv import fields, osv
from openerp import tools, api
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MAX_POP_MESSAGES = 50
class fetchmail_server(osv.osv):
"""Incoming POP/IMAP mail server account"""
_name = 'fetchmail.server'
_description = "POP/IMAP Server"
_order = 'priority'
_columns = {
'name':fields.char('Name', required=True, readonly=False),
'active':fields.boolean('Active', required=False),
'state':fields.selection([
('draft', 'Not Confirmed'),
('done', 'Confirmed'),
], 'Status', select=True, readonly=True, copy=False),
'server' : fields.char('Server Name', readonly=True, help="Hostname or IP of the mail server", states={'draft':[('readonly', False)]}),
'port' : fields.integer('Port', readonly=True, states={'draft':[('readonly', False)]}),
'type':fields.selection([
('pop', 'POP Server'),
('imap', 'IMAP Server'),
('local', 'Local Server'),
], 'Server Type', select=True, required=True, readonly=False),
'is_ssl':fields.boolean('SSL/TLS', help="Connections are encrypted with SSL/TLS through a dedicated port (default: IMAPS=993, POP3S=995)"),
'attach':fields.boolean('Keep Attachments', help="Whether attachments should be downloaded. "
"If not enabled, incoming emails will be stripped of any attachments before being processed"),
'original':fields.boolean('Keep Original', help="Whether a full original copy of each email should be kept for reference"
"and attached to each processed message. This will usually double the size of your message database."),
'date': fields.datetime('Last Fetch Date', readonly=True),
'user' : fields.char('Username', readonly=True, states={'draft':[('readonly', False)]}),
'password' : fields.char('Password', readonly=True, states={'draft':[('readonly', False)]}),
'action_id':fields.many2one('ir.actions.server', 'Server Action', help="Optional custom server action to trigger for each incoming mail, "
"on the record that was created or updated by this mail"),
'object_id': fields.many2one('ir.model', "Create a New Record", help="Process each incoming mail as part of a conversation "
"corresponding to this document type. This will create "
"new documents for new conversations, or attach follow-up "
"emails to the existing conversations (documents)."),
'priority': fields.integer('Server Priority', readonly=True, states={'draft':[('readonly', False)]}, help="Defines the order of processing, "
"lower values mean higher priority"),
'message_ids': fields.one2many('mail.mail', 'fetchmail_server_id', 'Messages', readonly=True),
'configuration' : fields.text('Configuration', readonly=True),
'script' : fields.char('Script', readonly=True),
}
_defaults = {
'state': "draft",
'type': "pop",
'active': True,
'priority': 5,
'attach': True,
'script': '/mail/static/scripts/openerp_mailgate.py',
}
def onchange_server_type(self, cr, uid, ids, server_type=False, ssl=False, object_id=False):
port = 0
values = {}
if server_type == 'pop':
port = ssl and 995 or 110
elif server_type == 'imap':
port = ssl and 993 or 143
else:
values['server'] = ''
values['port'] = port
conf = {
'dbname' : cr.dbname,
'uid' : uid,
'model' : 'MODELNAME',
}
if object_id:
m = self.pool.get('ir.model')
r = m.read(cr,uid,[object_id],['model'])
conf['model']=r[0]['model']
values['configuration'] = """Use the below script with the following command line options with your Mail Transport Agent (MTA)
openerp_mailgate.py --host=HOSTNAME --port=PORT -u %(uid)d -p PASSWORD -d %(dbname)s
Example configuration for the postfix mta running locally:
/etc/postfix/virtual_aliases:
@youdomain openerp_mailgate@localhost
/etc/aliases:
openerp_mailgate: "|/path/to/openerp-mailgate.py --host=localhost -u %(uid)d -p PASSWORD -d %(dbname)s"
""" % conf
return {'value':values}
def set_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids , {'state':'draft'})
return True
@api.cr_uid_ids_context
def connect(self, cr, uid, server_id, context=None):
if isinstance(server_id, (list,tuple)):
server_id = server_id[0]
server = self.browse(cr, uid, server_id, context)
if server.type == 'imap':
if server.is_ssl:
connection = IMAP4_SSL(server.server, int(server.port))
else:
connection = IMAP4(server.server, int(server.port))
connection.login(server.user, server.password)
elif server.type == 'pop':
if server.is_ssl:
connection = POP3_SSL(server.server, int(server.port))
else:
connection = POP3(server.server, int(server.port))
#TODO: use this to remove only unread messages
#connection.user("recent:"+server.user)
connection.user(server.user)
connection.pass_(server.password)
return connection
def button_confirm_login(self, cr, uid, ids, context=None):
if context is None:
context = {}
for server in self.browse(cr, uid, ids, context=context):
try:
connection = server.connect()
server.write({'state':'done'})
except Exception, e:
_logger.exception("Failed to connect to %s server %s.", server.type, server.name)
raise osv.except_osv(_("Connection test failed!"), _("Here is what we got instead:\n %s.") % tools.ustr(e))
finally:
try:
if connection:
if server.type == 'imap':
connection.close()
elif server.type == 'pop':
connection.quit()
except Exception:
# ignored, just a consequence of the previous exception
pass
return True
def _fetch_mails(self, cr, uid, ids=False, context=None):
if not ids:
ids = self.search(cr, uid, [('state','=','done'),('type','in',['pop','imap'])])
return self.fetch_mail(cr, uid, ids, context=context)
def fetch_mail(self, cr, uid, ids, context=None):
"""WARNING: meant for cron usage only - will commit() after each email!"""
context = dict(context or {})
context['fetchmail_cron_running'] = True
mail_thread = self.pool.get('mail.thread')
action_pool = self.pool.get('ir.actions.server')
for server in self.browse(cr, uid, ids, context=context):
_logger.info('start checking for new emails on %s server %s', server.type, server.name)
context.update({'fetchmail_server_id': server.id, 'server_type': server.type})
count, failed = 0, 0
imap_server = False
pop_server = False
if server.type == 'imap':
try:
imap_server = server.connect()
imap_server.select()
result, data = imap_server.search(None, '(UNSEEN)')
for num in data[0].split():
res_id = None
result, data = imap_server.fetch(num, '(RFC822)')
imap_server.store(num, '-FLAGS', '\\Seen')
try:
res_id = mail_thread.message_process(cr, uid, server.object_id.model,
data[0][1],
save_original=server.original,
strip_attachments=(not server.attach),
context=context)
except Exception:
_logger.exception('Failed to process mail from %s server %s.', server.type, server.name)
failed += 1
if res_id and server.action_id:
action_pool.run(cr, uid, [server.action_id.id], {'active_id': res_id, 'active_ids': [res_id], 'active_model': context.get("thread_model", server.object_id.model)})
imap_server.store(num, '+FLAGS', '\\Seen')
cr.commit()
count += 1
_logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", count, server.type, server.name, (count - failed), failed)
except Exception:
_logger.exception("General failure when trying to fetch mail from %s server %s.", server.type, server.name)
finally:
if imap_server:
imap_server.close()
imap_server.logout()
elif server.type == 'pop':
try:
while True:
pop_server = server.connect()
(numMsgs, totalSize) = pop_server.stat()
pop_server.list()
for num in range(1, min(MAX_POP_MESSAGES, numMsgs) + 1):
(header, msges, octets) = pop_server.retr(num)
msg = '\n'.join(msges)
res_id = None
try:
res_id = mail_thread.message_process(cr, uid, server.object_id.model,
msg,
save_original=server.original,
strip_attachments=(not server.attach),
context=context)
pop_server.dele(num)
except Exception:
_logger.exception('Failed to process mail from %s server %s.', server.type, server.name)
failed += 1
if res_id and server.action_id:
action_pool.run(cr, uid, [server.action_id.id], {'active_id': res_id, 'active_ids': [res_id], 'active_model': context.get("thread_model", server.object_id.model)})
cr.commit()
if numMsgs < MAX_POP_MESSAGES:
break
pop_server.quit()
_logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", numMsgs, server.type, server.name, (numMsgs - failed), failed)
except Exception:
_logger.exception("General failure when trying to fetch mail from %s server %s.", server.type, server.name)
finally:
if pop_server:
pop_server.quit()
server.write({'date': time.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)})
return True
def _update_cron(self, cr, uid, context=None):
if context and context.get('fetchmail_cron_running'):
return
try:
cron = self.pool['ir.model.data'].get_object(
cr, uid, 'fetchmail', 'ir_cron_mail_gateway_action', context=context)
except ValueError:
# Nevermind if default cron cannot be found
return
# Enabled/Disable cron based on the number of 'done' server of type pop or imap
cron.toggle(model=self._name, domain=[('state','=','done'), ('type','in',['pop','imap'])])
def create(self, cr, uid, values, context=None):
res = super(fetchmail_server, self).create(cr, uid, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(fetchmail_server, self).write(cr, uid, ids, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(fetchmail_server, self).unlink(cr, uid, ids, context=context)
self._update_cron(cr, uid, context=context)
return res
class mail_mail(osv.osv):
_inherit = "mail.mail"
_columns = {
'fetchmail_server_id': fields.many2one('fetchmail.server', "Inbound Mail Server",
readonly=True,
select=True,
oldname='server_id'),
}
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
fetchmail_server_id = context.get('fetchmail_server_id')
if fetchmail_server_id:
values['fetchmail_server_id'] = fetchmail_server_id
res = super(mail_mail, self).create(cr, uid, values, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
if context is None:
context = {}
fetchmail_server_id = context.get('fetchmail_server_id')
if fetchmail_server_id:
values['fetchmail_server_id'] = fetchmail_server_id
res = super(mail_mail, self).write(cr, uid, ids, values, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pumaking/hackracer | lib/flask/_compat.py | 783 | 2164 | # -*- coding: utf-8 -*-
"""
flask._compat
~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_to_string = _identity
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
| apache-2.0 |
vladikr/nova_drafts | nova/tests/integrated/api/client.py | 3 | 11942 | # Copyright (c) 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import urllib
import six.moves.urllib.parse as urlparse
from nova.i18n import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.tests.image import fake
LOG = logging.getLogger(__name__)
class OpenStackApiException(Exception):
def __init__(self, message=None, response=None):
self.response = response
if not message:
message = 'Unspecified error'
if response:
_status = response.status
_body = response.read()
message = (_('%(message)s\nStatus Code: %(_status)s\n'
'Body: %(_body)s') %
{'message': message, '_status': _status,
'_body': _body})
super(OpenStackApiException, self).__init__(message)
class OpenStackApiAuthenticationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = _("Authentication error")
super(OpenStackApiAuthenticationException, self).__init__(message,
response)
class OpenStackApiAuthorizationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = _("Authorization error")
super(OpenStackApiAuthorizationException, self).__init__(message,
response)
class OpenStackApiNotFoundException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = _("Item not found")
super(OpenStackApiNotFoundException, self).__init__(message, response)
class TestOpenStackClient(object):
"""Simple OpenStack API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing
"""
def __init__(self, auth_user, auth_key, auth_uri):
super(TestOpenStackClient, self).__init__()
self.auth_result = None
self.auth_user = auth_user
self.auth_key = auth_key
self.auth_uri = auth_uri
# default project_id
self.project_id = 'openstack'
def request(self, url, method='GET', body=None, headers=None):
_headers = {'Content-Type': 'application/json'}
_headers.update(headers or {})
parsed_url = urlparse.urlparse(url)
port = parsed_url.port
hostname = parsed_url.hostname
scheme = parsed_url.scheme
if scheme == 'http':
conn = httplib.HTTPConnection(hostname,
port=port)
elif scheme == 'https':
conn = httplib.HTTPSConnection(hostname,
port=port)
else:
raise OpenStackApiException("Unknown scheme: %s" % url)
relative_url = parsed_url.path
if parsed_url.query:
relative_url = relative_url + "?" + parsed_url.query
LOG.info(_("Doing %(method)s on %(relative_url)s") %
{'method': method, 'relative_url': relative_url})
if body:
LOG.info(_("Body: %s") % body)
conn.request(method, relative_url, body, _headers)
response = conn.getresponse()
return response
def _authenticate(self):
if self.auth_result:
return self.auth_result
auth_uri = self.auth_uri
headers = {'X-Auth-User': self.auth_user,
'X-Auth-Key': self.auth_key,
'X-Auth-Project-Id': self.project_id}
response = self.request(auth_uri,
headers=headers)
http_status = response.status
LOG.debug("%(auth_uri)s => code %(http_status)s",
{'auth_uri': auth_uri, 'http_status': http_status})
if http_status == 401:
raise OpenStackApiAuthenticationException(response=response)
auth_headers = {}
for k, v in response.getheaders():
auth_headers[k] = v
self.auth_result = auth_headers
return self.auth_result
def api_request(self, relative_uri, check_response_status=None,
strip_version=False, **kwargs):
auth_result = self._authenticate()
# NOTE(justinsb): httplib 'helpfully' converts headers to lower case
base_uri = auth_result['x-server-management-url']
if strip_version:
# NOTE(vish): cut out version number and tenant_id
base_uri = '/'.join(base_uri.split('/', 3)[:-1])
full_uri = '%s/%s' % (base_uri, relative_uri)
headers = kwargs.setdefault('headers', {})
headers['X-Auth-Token'] = auth_result['x-auth-token']
response = self.request(full_uri, **kwargs)
http_status = response.status
LOG.debug("%(relative_uri)s => code %(http_status)s",
{'relative_uri': relative_uri, 'http_status': http_status})
if check_response_status:
if http_status not in check_response_status:
if http_status == 404:
raise OpenStackApiNotFoundException(response=response)
elif http_status == 401:
raise OpenStackApiAuthorizationException(response=response)
else:
raise OpenStackApiException(
message=_("Unexpected status code"),
response=response)
return response
def _decode_json(self, response):
body = response.read()
LOG.debug("Decoding JSON: %s", body)
if body:
return jsonutils.loads(body)
else:
return ""
def api_get(self, relative_uri, **kwargs):
kwargs.setdefault('check_response_status', [200])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_post(self, relative_uri, body, **kwargs):
kwargs['method'] = 'POST'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_put(self, relative_uri, body, **kwargs):
kwargs['method'] = 'PUT'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202, 204])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_delete(self, relative_uri, **kwargs):
kwargs['method'] = 'DELETE'
kwargs.setdefault('check_response_status', [200, 202, 204])
return self.api_request(relative_uri, **kwargs)
def get_server(self, server_id):
return self.api_get('/servers/%s' % server_id)['server']
def get_servers(self, detail=True, search_opts=None):
rel_url = '/servers/detail' if detail else '/servers'
if search_opts is not None:
qparams = {}
for opt, val in search_opts.iteritems():
qparams[opt] = val
if qparams:
query_string = "?%s" % urllib.urlencode(qparams)
rel_url += query_string
return self.api_get(rel_url)['servers']
def post_server(self, server):
response = self.api_post('/servers', server)
if 'reservation_id' in response:
return response
else:
return response['server']
def put_server(self, server_id, server):
return self.api_put('/servers/%s' % server_id, server)
def post_server_action(self, server_id, data):
return self.api_post('/servers/%s/action' % server_id, data)
def delete_server(self, server_id):
return self.api_delete('/servers/%s' % server_id)
def get_image(self, image_id):
return self.api_get('/images/%s' % image_id)['image']
def get_images(self, detail=True):
rel_url = '/images/detail' if detail else '/images'
return self.api_get(rel_url)['images']
def post_image(self, image):
return self.api_post('/images', image)['image']
def delete_image(self, image_id):
return self.api_delete('/images/%s' % image_id)
def get_flavor(self, flavor_id):
return self.api_get('/flavors/%s' % flavor_id)['flavor']
def get_flavors(self, detail=True):
rel_url = '/flavors/detail' if detail else '/flavors'
return self.api_get(rel_url)['flavors']
def post_flavor(self, flavor):
return self.api_post('/flavors', flavor)['flavor']
def delete_flavor(self, flavor_id):
return self.api_delete('/flavors/%s' % flavor_id)
def get_volume(self, volume_id):
return self.api_get('/volumes/%s' % volume_id)['volume']
def get_volumes(self, detail=True):
rel_url = '/volumes/detail' if detail else '/volumes'
return self.api_get(rel_url)['volumes']
def post_volume(self, volume):
return self.api_post('/volumes', volume)['volume']
def delete_volume(self, volume_id):
return self.api_delete('/volumes/%s' % volume_id)
def get_server_volume(self, server_id, attachment_id):
return self.api_get('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))['volumeAttachment']
def get_server_volumes(self, server_id):
return self.api_get('/servers/%s/os-volume_attachments' %
(server_id))['volumeAttachments']
def post_server_volume(self, server_id, volume_attachment):
return self.api_post('/servers/%s/os-volume_attachments' %
(server_id), volume_attachment
)['volumeAttachment']
def delete_server_volume(self, server_id, attachment_id):
return self.api_delete('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))
class TestOpenStackClientV3(TestOpenStackClient):
"""Simple OpenStack v3 API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing.
Note that the V3 API does not have an image API and so it is
not possible to query the api for the image information.
So instead we just access the fake image service used by the unittests
directly.
"""
def get_image(self, image_id):
return fake._fakeImageService.show(None, image_id)
def get_images(self, detail=True):
return fake._fakeImageService.detail(None)
def post_image(self, image):
raise NotImplementedError
def delete_image(self, image_id):
return fake._fakeImageService.delete(None, image_id)
class TestOpenStackClientV3Mixin(object):
def _get_test_client(self):
return TestOpenStackClientV3('fake', 'fake', self.auth_url)
| apache-2.0 |
marcoarruda/MissionPlanner | Lib/lib2to3/main.py | 50 | 7057 | """
Main program for 2to3.
"""
from __future__ import with_statement
import sys
import os
import difflib
import logging
import shutil
import optparse
from . import refactor
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm="")
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
"""
Prints output to stdout.
"""
def __init__(self, fixers, options, explicit, nobackups, show_diffs):
self.nobackups = nobackups
self.show_diffs = show_diffs
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
if not self.nobackups:
# Make backup
backup = filename + ".bak"
if os.path.lexists(backup):
try:
os.remove(backup)
except os.error, err:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except os.error, err:
self.log_message("Can't rename %s to %s", filename, backup)
# Actually write the new file
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
if not self.nobackups:
shutil.copymode(backup, filename)
def print_output(self, old, new, filename, equal):
if equal:
self.log_message("No changes to %s", filename)
else:
self.log_message("Refactored %s", filename)
if self.show_diffs:
diff_lines = diff_texts(old, new, filename)
try:
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
print line
sys.stdout.flush()
else:
for line in diff_lines:
print line
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" %
(filename,))
return
def warn(msg):
print >> sys.stderr, "WARNING: %s" % (msg,)
def main(fixer_pkg, args=None):
"""Main program.
Args:
fixer_pkg: the name of a package where the fixers are located.
args: optional; a list of command line arguments. If omitted,
sys.argv[1:] is used.
Returns a suggested exit status (0, 1, 2).
"""
# Set up option parser
parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
parser.add_option("-d", "--doctests_only", action="store_true",
help="Fix up doctests only")
parser.add_option("-f", "--fix", action="append", default=[],
help="Each FIX specifies a transformation; default: all")
parser.add_option("-j", "--processes", action="store", default=1,
type="int", help="Run 2to3 concurrently")
parser.add_option("-x", "--nofix", action="append", default=[],
help="Prevent a transformation from being run")
parser.add_option("-l", "--list-fixes", action="store_true",
help="List available transformations")
parser.add_option("-p", "--print-function", action="store_true",
help="Modify the grammar so that print() is a function")
parser.add_option("-v", "--verbose", action="store_true",
help="More verbose logging")
parser.add_option("--no-diffs", action="store_true",
help="Don't show diffs of the refactoring")
parser.add_option("-w", "--write", action="store_true",
help="Write back modified files")
parser.add_option("-n", "--nobackups", action="store_true", default=False,
help="Don't write backups for modified files")
# Parse command line arguments
refactor_stdin = False
flags = {}
options, args = parser.parse_args(args)
if not options.write and options.no_diffs:
warn("not writing files and not printing diffs; that's not very useful")
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.list_fixes:
print "Available transformations for the -f/--fix option:"
for fixname in refactor.get_all_fix_names(fixer_pkg):
print fixname
if not args:
return 0
if not args:
print >> sys.stderr, "At least one file or directory argument required."
print >> sys.stderr, "Use --help to show usage."
return 2
if "-" in args:
refactor_stdin = True
if options.write:
print >> sys.stderr, "Can't write to stdin."
return 2
if options.print_function:
flags["print_function"] = True
# Set up logging handler
level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
# Initialize the refactoring tool
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
explicit = set()
if options.fix:
all_present = False
for fix in options.fix:
if fix == "all":
all_present = True
else:
explicit.add(fixer_pkg + ".fix_" + fix)
requested = avail_fixes.union(explicit) if all_present else explicit
else:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
rt = StdoutRefactoringTool(sorted(fixer_names), flags, sorted(explicit),
options.nobackups, not options.no_diffs)
# Refactor all files and directories passed as arguments
if not rt.errors:
if refactor_stdin:
rt.refactor_stdin()
else:
try:
rt.refactor(args, options.write, options.doctests_only,
options.processes)
except refactor.MultiprocessingUnsupported:
assert options.processes > 1
print >> sys.stderr, "Sorry, -j isn't " \
"supported on this platform."
return 1
rt.summarize()
# Return error status (0 if rt.errors is zero)
return int(bool(rt.errors))
| gpl-3.0 |
Sarah-Alsinan/muypicky | lib/python3.6/site-packages/pip/_vendor/html5lib/filters/lint.py | 328 | 3365 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from . import base
from ..constants import namespaces, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class Filter(base.Filter):
def __init__(self, source, require_matching_tags=True):
super(Filter, self).__init__(source)
self.require_matching_tags = require_matching_tags
def __iter__(self):
open_elements = []
for token in base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
namespace = token["namespace"]
name = token["name"]
assert namespace is None or isinstance(namespace, text_type)
assert namespace != ""
assert isinstance(name, text_type)
assert name != ""
assert isinstance(token["data"], dict)
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
assert type == "EmptyTag"
else:
assert type == "StartTag"
if type == "StartTag" and self.require_matching_tags:
open_elements.append((namespace, name))
for (namespace, name), value in token["data"].items():
assert namespace is None or isinstance(namespace, text_type)
assert namespace != ""
assert isinstance(name, text_type)
assert name != ""
assert isinstance(value, text_type)
elif type == "EndTag":
namespace = token["namespace"]
name = token["name"]
assert namespace is None or isinstance(namespace, text_type)
assert namespace != ""
assert isinstance(name, text_type)
assert name != ""
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
assert False, "Void element reported as EndTag token: %(tag)s" % {"tag": name}
elif self.require_matching_tags:
start = open_elements.pop()
assert start == (namespace, name)
elif type == "Comment":
data = token["data"]
assert isinstance(data, text_type)
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
assert isinstance(data, text_type)
assert data != ""
if type == "SpaceCharacters":
assert data.strip(spaceCharacters) == ""
elif type == "Doctype":
name = token["name"]
assert name is None or isinstance(name, text_type)
assert token["publicId"] is None or isinstance(name, text_type)
assert token["systemId"] is None or isinstance(name, text_type)
elif type == "Entity":
assert isinstance(token["name"], text_type)
elif type == "SerializerError":
assert isinstance(token["data"], text_type)
else:
assert False, "Unknown token type: %(type)s" % {"type": type}
yield token
| mit |
lanfker/tdma_imac | .waf-1.6.7-0a94702c61504c487a251b8d0a04ca9a/waflib/Node.py | 4 | 11193 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/svn/docs/wafbook/single.html#_obtaining_the_waf_file
import sys
if sys.hexversion < 0x020400f0: from sets import Set as set
import os,re,sys,shutil
from waflib import Utils,Errors
exclude_regs='''
**/*~
**/#*#
**/.#*
**/%*%
**/._*
**/CVS
**/CVS/**
**/.cvsignore
**/SCCS
**/SCCS/**
**/vssver.scc
**/.svn
**/.svn/**
**/BitKeeper
**/.git
**/.git/**
**/.gitignore
**/.bzr
**/.bzrignore
**/.bzr/**
**/.hg
**/.hg/**
**/_MTN
**/_MTN/**
**/.arch-ids
**/{arch}
**/_darcs
**/_darcs/**
**/.DS_Store'''
def split_path(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret=path.split('/')[2:]
ret[0]='/'+ret[0]
return ret
return path.split('/')
re_sp=re.compile('[/\\\\]')
def split_path_win32(path):
if path.startswith('\\\\'):
ret=re.split(re_sp,path)[2:]
ret[0]='\\'+ret[0]
return ret
return re.split(re_sp,path)
if sys.platform=='cygwin':
split_path=split_path_cygwin
elif Utils.is_win32:
split_path=split_path_win32
class Node(object):
__slots__=('name','sig','children','parent','cache_abspath','cache_isdir')
def __init__(self,name,parent):
self.name=name
self.parent=parent
if parent:
if name in parent.children:
raise Errors.WafError('node %s exists in the parent files %r already'%(name,parent))
parent.children[name]=self
def __setstate__(self,data):
self.name=data[0]
self.parent=data[1]
if data[2]is not None:
self.children=data[2]
if data[3]is not None:
self.sig=data[3]
def __getstate__(self):
return(self.name,self.parent,getattr(self,'children',None),getattr(self,'sig',None))
def __str__(self):
return self.name
def __repr__(self):
return self.abspath()
def __hash__(self):
return id(self)
def __eq__(self,node):
return id(self)==id(node)
def __copy__(self):
raise Errors.WafError('nodes are not supposed to be copied')
def read(self,flags='r'):
return Utils.readf(self.abspath(),flags)
def write(self,data,flags='w'):
f=None
try:
f=open(self.abspath(),flags)
f.write(data)
finally:
if f:
f.close()
def chmod(self,val):
os.chmod(self.abspath(),val)
def delete(self):
try:
if getattr(self,'children',None):
shutil.rmtree(self.abspath())
else:
os.unlink(self.abspath())
except:
pass
try:
delattr(self,'children')
except:
pass
def suffix(self):
k=max(0,self.name.rfind('.'))
return self.name[k:]
def height(self):
d=self
val=-1
while d:
d=d.parent
val+=1
return val
def listdir(self):
lst=Utils.listdir(self.abspath())
lst.sort()
return lst
def mkdir(self):
if getattr(self,'cache_isdir',None):
return
try:
self.parent.mkdir()
except:
pass
if self.name:
try:
os.makedirs(self.abspath())
except OSError:
pass
if not os.path.isdir(self.abspath()):
raise Errors.WafError('Could not create the directory %s'%self.abspath())
try:
self.children
except:
self.children={}
self.cache_isdir=True
def find_node(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
for x in lst:
if x=='..':
cur=cur.parent or cur
continue
try:
if x in cur.children:
cur=cur.children[x]
continue
except:
cur.children={}
cur=self.__class__(x,cur)
try:
os.stat(cur.abspath())
except:
del cur.parent.children[x]
return None
ret=cur
try:
while not getattr(cur.parent,'cache_isdir',None):
cur=cur.parent
cur.cache_isdir=True
except AttributeError:
pass
return ret
def make_node(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
for x in lst:
if x=='..':
cur=cur.parent or cur
continue
if getattr(cur,'children',{}):
if x in cur.children:
cur=cur.children[x]
continue
else:
cur.children={}
cur=self.__class__(x,cur)
return cur
def search(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
try:
for x in lst:
if x=='..':
cur=cur.parent or cur
else:
cur=cur.children[x]
return cur
except:
pass
def path_from(self,node):
c1=self
c2=node
c1h=c1.height()
c2h=c2.height()
lst=[]
up=0
while c1h>c2h:
lst.append(c1.name)
c1=c1.parent
c1h-=1
while c2h>c1h:
up+=1
c2=c2.parent
c2h-=1
while id(c1)!=id(c2):
lst.append(c1.name)
up+=1
c1=c1.parent
c2=c2.parent
for i in range(up):
lst.append('..')
lst.reverse()
return os.sep.join(lst)or'.'
def abspath(self):
try:
return self.cache_abspath
except:
pass
if os.sep=='/':
if not self.parent:
val=os.sep
elif not self.parent.name:
val=os.sep+self.name
else:
val=self.parent.abspath()+os.sep+self.name
else:
if not self.parent:
val=''
elif not self.parent.name:
val=self.name+os.sep
else:
val=self.parent.abspath().rstrip(os.sep)+os.sep+self.name
self.cache_abspath=val
return val
def is_child_of(self,node):
p=self
diff=self.height()-node.height()
while diff>0:
diff-=1
p=p.parent
return id(p)==id(node)
def ant_iter(self,accept=None,maxdepth=25,pats=[],dir=False,src=True,remove=True):
dircont=self.listdir()
dircont.sort()
try:
lst=set(self.children.keys())
if remove:
for x in lst-set(dircont):
del self.children[x]
except:
self.children={}
for name in dircont:
npats=accept(name,pats)
if npats and npats[0]:
accepted=[]in npats[0]
node=self.make_node([name])
isdir=os.path.isdir(node.abspath())
if accepted:
if isdir:
if dir:
yield node
else:
if src:
yield node
if getattr(node,'cache_isdir',None)or isdir:
node.cache_isdir=True
if maxdepth:
for k in node.ant_iter(accept=accept,maxdepth=maxdepth-1,pats=npats,dir=dir,src=src):
yield k
raise StopIteration
def ant_glob(self,*k,**kw):
src=kw.get('src',True)
dir=kw.get('dir',False)
excl=kw.get('excl',exclude_regs)
incl=k and k[0]or kw.get('incl','**')
def to_pat(s):
lst=Utils.to_list(s)
ret=[]
for x in lst:
x=x.replace('\\','/').replace('//','/')
if x.endswith('/'):
x+='**'
lst2=x.split('/')
accu=[]
for k in lst2:
if k=='**':
accu.append(k)
else:
k=k.replace('.','[.]').replace('*','.*').replace('?','.').replace('+','\\+')
k='^%s$'%k
try:
accu.append(re.compile(k))
except Exception ,e:
raise Errors.WafError("Invalid pattern: %s"%k,e)
ret.append(accu)
return ret
def filtre(name,nn):
ret=[]
for lst in nn:
if not lst:
pass
elif lst[0]=='**':
ret.append(lst)
if len(lst)>1:
if lst[1].match(name):
ret.append(lst[2:])
else:
ret.append([])
elif lst[0].match(name):
ret.append(lst[1:])
return ret
def accept(name,pats):
nacc=filtre(name,pats[0])
nrej=filtre(name,pats[1])
if[]in nrej:
nacc=[]
return[nacc,nrej]
ret=[x for x in self.ant_iter(accept=accept,pats=[to_pat(incl),to_pat(excl)],maxdepth=25,dir=dir,src=src,remove=kw.get('remove',True))]
if kw.get('flat',False):
return' '.join([x.path_from(self)for x in ret])
return ret
def find_nodes(self,find_dirs=True,find_files=True,match_fun=lambda x:True):
x="""
Recursively finds nodes::
def configure(cnf):
cnf.find_nodes()
:param find_dirs: whether to return directories
:param find_files: whether to return files
:param match_fun: matching function, taking a node as parameter
:rtype generator
:return: a generator that iterates over all the requested files
"""
files=self.listdir()
for f in files:
node=self.make_node([f])
if os.path.isdir(node.abspath()):
if find_dirs and match_fun(node):
yield node
gen=node.find_nodes(find_dirs,find_files,match_fun)
for g in gen:
yield g
else:
if find_files and match_fun(node):
yield node
def is_src(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==y:
return False
if id(cur)==x:
return True
cur=cur.parent
return False
def is_bld(self):
cur=self
y=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==y:
return True
cur=cur.parent
return False
def get_src(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
lst=[]
while cur.parent:
if id(cur)==y:
lst.reverse()
return self.ctx.srcnode.make_node(lst)
if id(cur)==x:
return self
lst.append(cur.name)
cur=cur.parent
return self
def get_bld(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
lst=[]
while cur.parent:
if id(cur)==y:
return self
if id(cur)==x:
lst.reverse()
return self.ctx.bldnode.make_node(lst)
lst.append(cur.name)
cur=cur.parent
return self
def find_resource(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.get_bld().search(lst)
if not node:
self=self.get_src()
node=self.search(lst)
if not node:
node=self.find_node(lst)
try:
pat=node.abspath()
if os.path.isdir(pat):
return None
except:
pass
return node
def find_or_declare(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.get_bld().search(lst)
if node:
if not os.path.isfile(node.abspath()):
node.sig=None
try:
node.parent.mkdir()
except:
pass
return node
self=self.get_src()
node=self.find_node(lst)
if node:
if not os.path.isfile(node.abspath()):
node.sig=None
try:
node.parent.mkdir()
except:
pass
return node
node=self.get_bld().make_node(lst)
node.parent.mkdir()
return node
def find_dir(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.find_node(lst)
try:
if not os.path.isdir(node.abspath()):
return None
except(OSError,AttributeError):
return None
return node
def change_ext(self,ext,ext_in=None):
name=self.name
if ext_in is None:
k=name.rfind('.')
if k>=0:
name=name[:k]+ext
else:
name=name+ext
else:
name=name[:-len(ext_in)]+ext
return self.parent.find_or_declare([name])
def nice_path(self,env=None):
return self.path_from(self.ctx.launch_node())
def bldpath(self):
return self.path_from(self.ctx.bldnode)
def srcpath(self):
return self.path_from(self.ctx.srcnode)
def relpath(self):
cur=self
x=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==x:
return self.bldpath()
cur=cur.parent
return self.srcpath()
def bld_dir(self):
return self.parent.bldpath()
def bld_base(self):
s=os.path.splitext(self.name)[0]
return self.bld_dir()+os.sep+s
def get_bld_sig(self):
try:
ret=self.ctx.hash_cache[id(self)]
except KeyError:
pass
except AttributeError:
self.ctx.hash_cache={}
else:
return ret
if not self.is_bld()or self.ctx.bldnode is self.ctx.srcnode:
self.sig=Utils.h_file(self.abspath())
self.ctx.hash_cache[id(self)]=ret=self.sig
return ret
pickle_lock=Utils.threading.Lock()
class Nod3(Node):
pass
| gpl-2.0 |
siutanwong/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 142 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
AngelkPetkov/titanium_mobile | support/iphone/filetail.py | 37 | 7564 | ## {{{ http://code.activestate.com/recipes/436477/ (r1)
# filetail.py
# Copyright (C) 2005 by The Trustees of the University of Pennsylvania
# Licensed under the Python license http://www.python.org/psf/license/
# Author: Jon Moore
"""
Module to allow for reading lines from a continuously-growing file (such as
a system log). Handles log files that get rotated/trucated out from under
us. Inspired by the Perl File::Tail module.
Example:
t = filetail.Tail("log.txt")
while True:
line = t.nextline()
# do something with the line
or:
t = filetail.Tail("log.txt")
for line in t:
# do something
pass
"""
from os import stat
from os.path import abspath
from stat import ST_SIZE
from time import sleep, time
class Tail(object):
"""The Tail monitor object."""
def __init__(self, path, only_new = False,
min_sleep = 1,
sleep_interval = 1,
max_sleep = 60):
"""Initialize a tail monitor.
path: filename to open
only_new: By default, the tail monitor will start reading from
the beginning of the file when first opened. Set only_new to
True to have it skip to the end when it first opens, so that
you only get the new additions that arrive after you start
monitoring.
min_sleep: Shortest interval in seconds to sleep when waiting
for more input to arrive. Defaults to 1.0 second.
sleep_interval: The tail monitor will dynamically recompute an
appropriate sleep interval based on a sliding window of data
arrival rate. You can set sleep_interval here to seed it
initially if the default of 1.0 second doesn't work for you
and you don't want to wait for it to converge.
max_sleep: Maximum interval in seconds to sleep when waiting
for more input to arrive. Also, if this many seconds have
elapsed without getting any new data, the tail monitor will
check to see if the log got truncated (rotated) and will
quietly reopen itself if this was the case. Defaults to 60.0
seconds.
"""
# remember path to file in case I need to reopen
self.path = abspath(path)
self.f = open(self.path,"r")
self.min_sleep = min_sleep * 1.0
self.sleep_interval = sleep_interval * 1.0
self.max_sleep = max_sleep * 1.0
if only_new:
# seek to current end of file
file_len = stat(path)[ST_SIZE]
self.f.seek(file_len)
self.pos = self.f.tell() # where am I in the file?
self.last_read = time() # when did I last get some data?
self.queue = [] # queue of lines that are ready
self.window = [] # sliding window for dynamically
# adjusting the sleep_interval
def _recompute_rate(self, n, start, stop):
"""Internal function for recomputing the sleep interval. I get
called with a number of lines that appeared between the start and
stop times; this will get added to a sliding window, and I will
recompute the average interarrival rate over the last window.
"""
self.window.append((n, start, stop))
purge_idx = -1 # index of the highest old record
tot_n = 0 # total arrivals in the window
tot_start = stop # earliest time in the window
tot_stop = start # latest time in the window
for i, record in enumerate(self.window):
(i_n, i_start, i_stop) = record
if i_stop < start - self.max_sleep:
# window size is based on self.max_sleep; this record has
# fallen out of the window
purge_idx = i
else:
tot_n += i_n
if i_start < tot_start: tot_start = i_start
if i_stop > tot_stop: tot_stop = i_stop
if purge_idx >= 0:
# clean the old records out of the window (slide the window)
self.window = self.window[purge_idx+1:]
if tot_n > 0:
# recompute; stay within bounds
self.sleep_interval = (tot_stop - tot_start) / tot_n
if self.sleep_interval > self.max_sleep:
self.sleep_interval = self.max_sleep
if self.sleep_interval < self.min_sleep:
self.sleep_interval = self.min_sleep
def _fill_cache(self):
"""Internal method for grabbing as much data out of the file as is
available and caching it for future calls to nextline(). Returns
the number of lines just read.
"""
old_len = len(self.queue)
line = self.f.readline()
while line != "":
self.queue.append(line)
line = self.f.readline()
# how many did we just get?
num_read = len(self.queue) - old_len
if num_read > 0:
self.pos = self.f.tell()
now = time()
self._recompute_rate(num_read, self.last_read, now)
self.last_read = now
return num_read
def _dequeue(self):
"""Internal method; returns the first available line out of the
cache, if any."""
if len(self.queue) > 0:
line = self.queue[0]
self.queue = self.queue[1:]
return line
else:
return None
def _reset(self):
"""Internal method; reopen the internal file handle (probably
because the log file got rotated/truncated)."""
self.f.close()
self.f = open(self.path, "r")
self.pos = self.f.tell()
self.last_read = time()
def nextline(self):
"""Return the next line from the file. Blocks if there are no lines
immediately available."""
# see if we have any lines cached from the last file read
line = self._dequeue()
if line:
return line
# ok, we are out of cache; let's get some lines from the file
if self._fill_cache() > 0:
# got some
return self._dequeue()
# hmm, still no input available
while True:
sleep(self.sleep_interval)
if self._fill_cache() > 0:
return self._dequeue()
now = time()
if (now - self.last_read > self.max_sleep):
# maybe the log got rotated out from under us?
if stat(self.path)[ST_SIZE] < self.pos:
# file got truncated and/or re-created
self._reset()
if self._fill_cache() > 0:
return self._dequeue()
def close(self):
"""Close the tail monitor, discarding any remaining input."""
self.f.close()
self.f = None
self.queue = []
self.window = []
def __iter__(self):
"""Iterator interface, so you can do:
for line in filetail.Tail('log.txt'):
# do stuff
pass
"""
return self
def next(self):
"""Kick the iterator interface. Used under the covers to support:
for line in filetail.Tail('log.txt'):
# do stuff
pass
"""
return self.nextline()
## end of http://code.activestate.com/recipes/436477/ }}}
| apache-2.0 |
gurgeh/data-preppy | convert_coords.py | 1 | 2602 | import math
# Code converted from JS: http://latlong.mellifica.se/
axis = 6378137.0
flattening = 1.0 / 298.257222101
central_meridian = 15.00
lat_of_origin = 0.0
scale = 0.9996
false_northing = 0.0
false_easting = 500000.0
def grid_to_geodetic(x, y):
e2 = flattening * (2.0 - flattening)
n = flattening / (2.0 - flattening)
a_roof = axis / (1.0 + n) * (1.0 + n * n / 4.0 + n * n * n * n / 64.0)
delta1 = n / 2.0 - 2.0 * n * n / 3.0 + 37.0 * n * n * n / 96.0 - n * n * n * n / 360.0
delta2 = n * n / 48.0 + n * n * n / 15.0 - 437.0 * n * n * n * n / 1440.0
delta3 = 17.0 * n * n * n / 480.0 - 37 * n * n * n * n / 840.0
delta4 = 4397.0 * n * n * n * n / 161280.0
Astar = e2 + e2 * e2 + e2 * e2 * e2 + e2 * e2 * e2 * e2
Bstar = -(7.0 * e2 * e2 + 17.0 * e2 * e2 * e2 + 30.0 * e2 * e2 * e2 * e2) / 6.0
Cstar = (224.0 * e2 * e2 * e2 + 889.0 * e2 * e2 * e2 * e2) / 120.0
Dstar = -(4279.0 * e2 * e2 * e2 * e2) / 1260.0
deg_to_rad = math.pi / 180
lambda_zero = central_meridian * deg_to_rad
xi = (x - false_northing) / (scale * a_roof)
eta = (y - false_easting) / (scale * a_roof)
xi_prim = xi - delta1 * math.sin(2.0 * xi) * math.cosh(2.0 * eta) - \
delta2 * math.sin(4.0 * xi) * math.cosh(4.0 * eta) - \
delta3 * math.sin(6.0 * xi) * math.cosh(6.0 * eta) - \
delta4 * math.sin(8.0 * xi) * math.cosh(8.0 * eta)
eta_prim = eta - \
delta1 * math.cos(2.0 * xi) * math.sinh(2.0 * eta) - \
delta2 * math.cos(4.0 * xi) * math.sinh(4.0 * eta) - \
delta3 * math.cos(6.0 * xi) * math.sinh(6.0 * eta) - \
delta4 * math.cos(8.0 * xi) * math.sinh(8.0 * eta)
phi_star = math.asin(math.sin(xi_prim) / math.cosh(eta_prim))
delta_lambda = math.atan(math.sinh(eta_prim) / math.cos(xi_prim))
lon_radian = lambda_zero + delta_lambda
lat_radian = phi_star + math.sin(phi_star) * math.cos(phi_star) * \
(Astar +
Bstar * math.pow(math.sin(phi_star), 2) +
Cstar * math.pow(math.sin(phi_star), 4) +
Dstar * math.pow(math.sin(phi_star), 6))
return lat_radian * 180.0 / math.pi, lon_radian * 180.0 / math.pi
def distance_in_km((lat1, lon1), (lat2, lon2)):
R = 6371 # Radius of the earth in km
dLat = math.radians(lat2 - lat1)
dLon = math.radians(lon2 - lon1)
a = math.sin(dLat / 2) * math.sin(dLat / 2) + \
math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * \
math.sin(dLon / 2) * math.sin(dLon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = R * c # Distance in km
return d
| apache-2.0 |
lasershow/codecombat | scripts/devSetup/repositoryInstaller.py | 70 | 4158 | from __future__ import print_function
__author__ = u'schmatz'
import configuration
import errors
import subprocess
import os
import sys
from which import which
#git clone https://github.com/nwinter/codecombat.git coco
class RepositoryInstaller():
def __init__(self,config):
self.config = config
assert isinstance(self.config,configuration.Configuration)
if not self.checkIfGitExecutableExists():
if self.config.system.operating_system == "linux":
raise errors.CoCoError("Git is missing. Please install it (try 'sudo apt-get install git')\nIf you are not using Ubuntu then please see your Linux Distribution's documentation for help installing git.")
elif self.config.system.operating_system == "mac":
raise errors.CoCoError("Git is missing. Please install the Xcode command line tools.")
raise errors.CoCoError(u"Git is missing. Please install git.")
#http://stackoverflow.com/questions/9329243/xcode-4-4-and-later-install-command-line-tools
if not self.checkIfCurlExecutableExists():
if self.config.system.operating_system == "linux":
raise errors.CoCoError("Curl is missing. Please install it (try 'sudo apt-get install curl')\nIf you are not using Ubuntu then please see your Linux Distribution's documentation for help installing curl.")
elif self.config.system.operating_system == "mac":
raise errors.CoCoError("Curl is missing. Please install the Xcode command line tools.")
raise errors.CoCoError(u"Git is missing. Please install git.")
def checkIfGitExecutableExists(self):
gitPath = which(u"git")
if gitPath:
return True
else:
return False
#TODO: Refactor this into a more appropriate file
def checkIfCurlExecutableExists(self):
curlPath = which("curl")
if curlPath:
return True
else:
return False
def cloneRepository(self):
print(u"Cloning repository...")
#TODO: CHANGE THIS BEFORE LAUNCH
return_code = True
git_folder = self.config.directory.root_install_directory + os.sep + "coco"
print("Installing into " + git_folder)
return_code = subprocess.call("git clone " + self.config.repository_url +" coco",cwd=self.config.directory.root_install_directory,shell=True)
#TODO: remove this on windos
subprocess.call("chown -R " +git_folder + " 0777",shell=True)
if return_code and self.config.system.operating_system != u"windows":
#raise errors.CoCoError("Failed to clone git repository")
import shutil
#import sys
#sys.stdout.flush()
raw_input(u"Copy it now")
#shutil.copytree(u"/Users/schmatz/coco",self.config.directory.root_install_directory + os.sep + u"coco")
print(u"Copied tree just for you")
#print("FAILED TO CLONE GIT REPOSITORY")
#input("Clone the repository and click any button to continue")
elif self.config.system.operating_system == u"windows":
raise errors.CoCoError(u"Windows doesn't support automated installations of npm at this point.")
else:
print(u"Cloned git repository")
def install_node_packages(self):
print(u"Installing node packages...")
#TODO: "Replace npm with more robust package
#npm_location = self.config.directory.bin_directory + os.sep + "node" + os.sep + "bin" + os.sep + "npm"
npm_location = u"npm"
if sys.version_info[0] == 2:
py_cmd = "python"
else:
py_cmd = subprocess.check_output(['which', 'python2'])
return_code = subprocess.call([npm_location, u"install",
"--python=" + py_cmd],
cwd=self.config.directory.root_dir +
os.sep + u"coco")
if return_code:
raise errors.CoCoError(u"Failed to install node packages")
else:
print(u"Installed node packages!")
| mit |
hecchi777/S3-SlaacSecuritySolution | impacket-0.9.11/examples/lookupsid.py | 1 | 5922 | #!/usr/bin/python
# Copyright (c) 2012-2014 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# $Id: lookupsid.py 1101 2014-01-14 22:15:30Z [email protected] $
#
# DCE/RPC lookup sid brute forcer example
#
# Author:
# Alberto Solino
#
# Reference for:
# DCE/RPC [MS-LSAT]
import socket
import string
import sys
import types
from impacket import uuid, ntlm, version
from impacket.dcerpc.v5 import transport, lsat, lsad
from impacket.dcerpc.v5.ndr import NULL
from impacket.dcerpc.v5.samr import SID_NAME_USE
from impacket.dcerpc.v5.dtypes import MAXIMUM_ALLOWED
import argparse
class LSALookupSid:
KNOWN_PROTOCOLS = {
'139/SMB': (r'ncacn_np:%s[\pipe\lsarpc]', 139),
'445/SMB': (r'ncacn_np:%s[\pipe\lsarpc]', 445),
'135/TCP': (r'ncacn_ip_tcp:%s', 135),
}
def __init__(self, username, password, domain, protocols = None,
hashes = None, maxRid=4000):
if not protocols:
protocols = LSALookupSid.KNOWN_PROTOCOLS.keys()
self.__username = username
self.__password = password
self.__protocols = [protocols]
self.__maxRid = int(maxRid)
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def dump(self, addr):
print 'Brute forcing SIDs at %s' % addr
# Try all requested protocols until one works.
entries = []
for protocol in self.__protocols:
protodef = LSALookupSid.KNOWN_PROTOCOLS[protocol]
port = protodef[1]
print "Trying protocol %s..." % protocol
stringbinding = protodef[0] % addr
rpctransport = transport.DCERPCTransportFactory(stringbinding)
rpctransport.set_dport(port)
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash)
try:
entries = self.__bruteForce(rpctransport, self.__maxRid)
except Exception, e:
import traceback
print traceback.print_exc()
print e
raise
else:
# Got a response. No need for further iterations.
break
def __bruteForce(self, rpctransport, maxRid):
dce = rpctransport.get_dce_rpc()
entries = []
dce.connect()
# Want encryption? Uncomment next line
# But make SIMULTANEOUS variable <= 100
#dce.set_auth_level(ntlm.NTLM_AUTH_PKT_PRIVACY)
# Want fragmentation? Uncomment next line
#dce.set_max_fragment_size(32)
dce.bind(lsat.MSRPC_UUID_LSAT)
resp = lsat.hLsarOpenPolicy2(dce, MAXIMUM_ALLOWED | lsat.POLICY_LOOKUP_NAMES)
policyHandle = resp['PolicyHandle']
resp = lsad.hLsarQueryInformationPolicy2(dce, policyHandle, lsad.POLICY_INFORMATION_CLASS.PolicyAccountDomainInformation)
domainSid = resp['PolicyInformation']['PolicyAccountDomainInfo']['DomainSid'].formatCanonical()
soFar = 0
SIMULTANEOUS = 1000
for j in range(maxRid/SIMULTANEOUS+1):
if (maxRid - soFar) / SIMULTANEOUS == 0:
sidsToCheck = (maxRid - soFar) % SIMULTANEOUS
else:
sidsToCheck = SIMULTANEOUS
sids = list()
for i in xrange(soFar, soFar+sidsToCheck):
sids.append(domainSid + '-%d' % (i))
try:
request = lsat.hLsarLookupSids(dce, policyHandle, sids,lsat.LSAP_LOOKUP_LEVEL.LsapLookupWksta)
except Exception, e:
if str(e).find('STATUS_NONE_MAPPED') >= 0:
soFar += SIMULTANEOUS
continue
elif str(e).find('STATUS_SOME_NOT_MAPPED') >= 0:
resp = e.get_packet()
else:
raise
for n, item in enumerate(resp['TranslatedNames']['Names']):
if item['Use'] != SID_NAME_USE.SidTypeUnknown:
print "%d: %s\\%s (%s)" % (soFar+n, resp['ReferencedDomains']['Domains'][item['DomainIndex']]['Name'], item['Name'], SID_NAME_USE.enumItems(item['Use']).name)
soFar += SIMULTANEOUS
dce.disconnect()
return entries
# Process command-line arguments.
if __name__ == '__main__':
print version.BANNER
parser = argparse.ArgumentParser()
parser.add_argument('target', action='store', help='[domain/][username[:password]@]<address>')
parser.add_argument('maxRid', action='store', default = '4000', nargs='?', help='max Rid to check (default 4000)')
parser.add_argument('protocol', choices=LSALookupSid.KNOWN_PROTOCOLS.keys(), nargs='?', default='445/SMB', help='transport protocol (default 445/SMB)')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
import re
domain, username, password, address = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(options.target).groups('')
if domain is None:
domain = ''
if password == '' and username != '' and options.hashes is None:
from getpass import getpass
password = getpass("Password:")
lookup = LSALookupSid(username, password, domain, options.protocol, options.hashes, options.maxRid)
try:
lookup.dump(address)
except Exception, e:
pass
| apache-2.0 |
ambikeshwar1991/sandhi-2 | module/gr36/grc/python/convert_hier.py | 16 | 3773 | """
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from Constants import BLOCK_DTD
from .. base import ParseXML
from .. base import odict
def convert_hier(flow_graph, python_file):
#extract info from the flow graph
input_sigs = flow_graph.get_io_signaturev('in')
output_sigs = flow_graph.get_io_signaturev('out')
input_msgp = flow_graph.get_msg_pad_sources();
output_msgp = flow_graph.get_msg_pad_sinks();
parameters = flow_graph.get_parameters()
block_key = flow_graph.get_option('id')
block_name = flow_graph.get_option('title') or flow_graph.get_option('id').replace('_', ' ').title()
block_category = flow_graph.get_option('category')
block_desc = flow_graph.get_option('description')
block_author = flow_graph.get_option('author')
#build the nested data
block_n = odict()
block_n['name'] = block_name
block_n['key'] = block_key
block_n['category'] = block_category
block_n['import'] = 'execfile("%s")'%python_file
#make data
if parameters: block_n['make'] = '%s(\n\t%s,\n)'%(
block_key,
',\n\t'.join(['%s=$%s'%(param.get_id(), param.get_id()) for param in parameters]),
)
else: block_n['make'] = '%s()'%block_key
#callback data
block_n['callback'] = ['set_%s($%s)'%(param.get_id(), param.get_id()) for param in parameters]
#param data
params_n = list()
for param in parameters:
param_n = odict()
param_n['name'] = param.get_param('label').get_value() or param.get_id()
param_n['key'] = param.get_id()
param_n['value'] = param.get_param('value').get_value()
param_n['type'] = 'raw'
params_n.append(param_n)
block_n['param'] = params_n
#sink data stream ports
block_n['sink'] = list()
for input_sig in input_sigs:
sink_n = odict()
sink_n['name'] = input_sig['label']
sink_n['type'] = input_sig['type']
sink_n['vlen'] = input_sig['vlen']
if input_sig['optional']: sink_n['optional'] = '1'
block_n['sink'].append(sink_n)
#sink data msg ports
for input_sig in input_msgp:
sink_n = odict()
sink_n['name'] = input_sig.get_param("label").get_value();
sink_n['type'] = "message"
sink_n['optional'] = input_sig.get_param("optional").get_value();
block_n['sink'].append(sink_n)
#source data stream ports
block_n['source'] = list()
for output_sig in output_sigs:
source_n = odict()
source_n['name'] = output_sig['label']
source_n['type'] = output_sig['type']
source_n['vlen'] = output_sig['vlen']
if output_sig['optional']: source_n['optional'] = '1'
block_n['source'].append(source_n)
#source data msg ports
for output_sig in output_msgp:
source_n = odict()
source_n['name'] = output_sig.get_param("label").get_value();
source_n['type'] = "message"
source_n['optional'] = output_sig.get_param("optional").get_value();
block_n['source'].append(source_n)
#doc data
block_n['doc'] = "%s\n%s\n%s"%(block_author, block_desc, python_file)
block_n['grc_source'] = "%s"%(flow_graph.grc_file_path)
#write the block_n to file
xml_file = python_file + '.xml'
ParseXML.to_file({'block': block_n}, xml_file)
ParseXML.validate_dtd(xml_file, BLOCK_DTD)
| gpl-3.0 |
Pixyn25/blakecoin_p2pool | p2pool/test/test_p2p.py | 269 | 2724 | import random
from twisted.internet import defer, endpoints, protocol, reactor
from twisted.trial import unittest
from p2pool import networks, p2p
from p2pool.bitcoin import data as bitcoin_data
from p2pool.util import deferral
class Test(unittest.TestCase):
@defer.inlineCallbacks
def test_sharereq(self):
class MyNode(p2p.Node):
def __init__(self, df):
p2p.Node.__init__(self, lambda: None, 29333, networks.nets['bitcoin'], {}, set([('127.0.0.1', 9333)]), 0, 0, 0, 0)
self.df = df
def handle_share_hashes(self, hashes, peer):
peer.get_shares(
hashes=[hashes[0]],
parents=5,
stops=[],
).chainDeferred(self.df)
df = defer.Deferred()
n = MyNode(df)
n.start()
try:
yield df
finally:
yield n.stop()
@defer.inlineCallbacks
def test_tx_limit(self):
class MyNode(p2p.Node):
def __init__(self, df):
p2p.Node.__init__(self, lambda: None, 29333, networks.nets['bitcoin'], {}, set([('127.0.0.1', 9333)]), 0, 0, 0, 0)
self.df = df
self.sent_time = 0
@defer.inlineCallbacks
def got_conn(self, conn):
p2p.Node.got_conn(self, conn)
yield deferral.sleep(.5)
new_mining_txs = dict(self.mining_txs_var.value)
for i in xrange(3):
huge_tx = dict(
version=0,
tx_ins=[],
tx_outs=[dict(
value=0,
script='x'*900000,
)],
lock_time=i,
)
new_mining_txs[bitcoin_data.hash256(bitcoin_data.tx_type.pack(huge_tx))] = huge_tx
self.mining_txs_var.set(new_mining_txs)
self.sent_time = reactor.seconds()
def lost_conn(self, conn, reason):
self.df.callback(None)
try:
p2p.Protocol.max_remembered_txs_size *= 10
df = defer.Deferred()
n = MyNode(df)
n.start()
yield df
if not (n.sent_time <= reactor.seconds() <= n.sent_time + 1):
raise ValueError('node did not disconnect within 1 seconds of receiving too much tx data')
yield n.stop()
finally:
p2p.Protocol.max_remembered_txs_size //= 10
| gpl-3.0 |
EduPepperPD/pepper2013 | common/lib/capa/capa/util.py | 19 | 3128 | from calc import evaluator
from cmath import isinf
#-----------------------------------------------------------------------------
#
# Utility functions used in CAPA responsetypes
def compare_with_tolerance(v1, v2, tol):
''' Compare v1 to v2 with maximum tolerance tol
tol is relative if it ends in %; otherwise, it is absolute
- v1 : student result (number)
- v2 : instructor result (number)
- tol : tolerance (string representing a number)
'''
relative = tol.endswith('%')
if relative:
tolerance_rel = evaluator(dict(), dict(), tol[:-1]) * 0.01
tolerance = tolerance_rel * max(abs(v1), abs(v2))
else:
tolerance = evaluator(dict(), dict(), tol)
if isinf(v1) or isinf(v2):
# If an input is infinite, we can end up with `abs(v1-v2)` and
# `tolerance` both equal to infinity. Then, below we would have
# `inf <= inf` which is a fail. Instead, compare directly.
return v1 == v2
else:
return abs(v1 - v2) <= tolerance
def contextualize_text(text, context): # private
''' Takes a string with variables. E.g. $a+$b.
Does a substitution of those variables from the context '''
if not text:
return text
for key in sorted(context, lambda x, y: cmp(len(y), len(x))):
# TODO (vshnayder): This whole replacement thing is a big hack
# right now--context contains not just the vars defined in the
# program, but also e.g. a reference to the numpy module.
# Should be a separate dict of variables that should be
# replaced.
if '$' + key in text:
try:
s = str(context[key])
except UnicodeEncodeError:
s = context[key].encode('utf8', errors='ignore')
text = text.replace('$' + key, s)
return text
def convert_files_to_filenames(answers):
'''
Check for File objects in the dict of submitted answers,
convert File objects to their filename (string)
'''
new_answers = dict()
for answer_id in answers.keys():
answer = answers[answer_id]
# Files are stored as a list, even if one file
if is_list_of_files(answer):
new_answers[answer_id] = [f.name for f in answer]
else:
new_answers[answer_id] = answers[answer_id]
return new_answers
def is_list_of_files(files):
return isinstance(files, list) and all(is_file(f) for f in files)
def is_file(file_to_test):
'''
Duck typing to check if 'file_to_test' is a File object
'''
return all(hasattr(file_to_test, method) for method in ['read', 'name'])
def find_with_default(node, path, default):
"""
Look for a child of node using , and return its text if found.
Otherwise returns default.
Arguments:
node: lxml node
path: xpath search expression
default: value to return if nothing found
Returns:
node.find(path).text if the find succeeds, default otherwise.
"""
v = node.find(path)
if v is not None:
return v.text
else:
return default
| agpl-3.0 |
emergebtc/muddery | evennia/evennia/utils/ansi.py | 1 | 33100 | """
ANSI - Gives colour to text.
Use the codes defined in ANSIPARSER in your text
to apply colour to text according to the ANSI standard.
Examples:
This is %crRed text%cn and this is normal again.
This is {rRed text{n and this is normal again.
Mostly you should not need to call parse_ansi() explicitly;
it is run by Evennia just before returning data to/from the
user.
"""
import re
from evennia.utils import utils
from evennia.utils.utils import to_str, to_unicode
# ANSI definitions
ANSI_BEEP = "\07"
ANSI_ESCAPE = "\033"
ANSI_NORMAL = "\033[0m"
ANSI_UNDERLINE = "\033[4m"
ANSI_HILITE = "\033[1m"
ANSI_UNHILITE = "\033[22m"
ANSI_BLINK = "\033[5m"
ANSI_INVERSE = "\033[7m"
ANSI_INV_HILITE = "\033[1;7m"
ANSI_INV_BLINK = "\033[7;5m"
ANSI_BLINK_HILITE = "\033[1;5m"
ANSI_INV_BLINK_HILITE = "\033[1;5;7m"
# Foreground colors
ANSI_BLACK = "\033[30m"
ANSI_RED = "\033[31m"
ANSI_GREEN = "\033[32m"
ANSI_YELLOW = "\033[33m"
ANSI_BLUE = "\033[34m"
ANSI_MAGENTA = "\033[35m"
ANSI_CYAN = "\033[36m"
ANSI_WHITE = "\033[37m"
# Background colors
ANSI_BACK_BLACK = "\033[40m"
ANSI_BACK_RED = "\033[41m"
ANSI_BACK_GREEN = "\033[42m"
ANSI_BACK_YELLOW = "\033[43m"
ANSI_BACK_BLUE = "\033[44m"
ANSI_BACK_MAGENTA = "\033[45m"
ANSI_BACK_CYAN = "\033[46m"
ANSI_BACK_WHITE = "\033[47m"
# Formatting Characters
ANSI_RETURN = "\r\n"
ANSI_TAB = "\t"
ANSI_SPACE = " "
# Escapes
ANSI_ESCAPES = ("{{", "\\\\")
from collections import OrderedDict
_PARSE_CACHE = OrderedDict()
_PARSE_CACHE_SIZE = 10000
class ANSIParser(object):
"""
A class that parses ANSI markup
to ANSI command sequences
We also allow to escape colour codes
by prepending with a \ for xterm256,
an extra { for Merc-style codes
"""
def sub_ansi(self, ansimatch):
"""
Replacer used by `re.sub` to replace ANSI
markers with correct ANSI sequences
"""
return self.ansi_map.get(ansimatch.group(), "")
def sub_brightbg(self, ansimatch):
"""
Replacer used by `re.sub` to replace ANSI
bright background markers with Xterm256 replacement
"""
return self.ansi_bright_bgs.get(ansimatch.group(), "")
def sub_xterm256(self, rgbmatch, convert=False):
"""
This is a replacer method called by `re.sub` with the matched
tag. It must return the correct ansi sequence.
It checks `self.do_xterm256` to determine if conversion
to standard ANSI should be done or not.
"""
if not rgbmatch:
return ""
# get tag, stripping the initial marker
rgbtag = rgbmatch.group()[1:]
background = rgbtag[0] == '['
if background:
red, green, blue = int(rgbtag[1]), int(rgbtag[2]), int(rgbtag[3])
else:
red, green, blue = int(rgbtag[0]), int(rgbtag[1]), int(rgbtag[2])
if convert:
colval = 16 + (red * 36) + (green * 6) + blue
#print "RGB colours:", red, green, blue
return "\033[%s8;5;%s%s%sm" % (3 + int(background), colval/100, (colval % 100)/10, colval%10)
else:
#print "ANSI convert:", red, green, blue
# xterm256 not supported, convert the rgb value to ansi instead
if red == green and red == blue and red < 2:
if background:
return ANSI_BACK_BLACK
elif red >= 1:
return ANSI_HILITE + ANSI_BLACK
else:
return ANSI_NORMAL + ANSI_BLACK
elif red == green and red == blue:
if background:
return ANSI_BACK_WHITE
elif red >= 4:
return ANSI_HILITE + ANSI_WHITE
else:
return ANSI_NORMAL + ANSI_WHITE
elif red > green and red > blue:
if background:
return ANSI_BACK_RED
elif red >= 3:
return ANSI_HILITE + ANSI_RED
else:
return ANSI_NORMAL + ANSI_RED
elif red == green and red > blue:
if background:
return ANSI_BACK_YELLOW
elif red >= 3:
return ANSI_HILITE + ANSI_YELLOW
else:
return ANSI_NORMAL + ANSI_YELLOW
elif red == blue and red > green:
if background:
return ANSI_BACK_MAGENTA
elif red >= 3:
return ANSI_HILITE + ANSI_MAGENTA
else:
return ANSI_NORMAL + ANSI_MAGENTA
elif green > blue:
if background:
return ANSI_BACK_GREEN
elif green >= 3:
return ANSI_HILITE + ANSI_GREEN
else:
return ANSI_NORMAL + ANSI_GREEN
elif green == blue:
if background:
return ANSI_BACK_CYAN
elif green >= 3:
return ANSI_HILITE + ANSI_CYAN
else:
return ANSI_NORMAL + ANSI_CYAN
else: # mostly blue
if background:
return ANSI_BACK_BLUE
elif blue >= 3:
return ANSI_HILITE + ANSI_BLUE
else:
return ANSI_NORMAL + ANSI_BLUE
def strip_raw_codes(self, string):
"""
Strips raw ANSI codes from a string.
"""
return self.ansi_regex.sub("", string)
def strip_mxp(self, string):
"""
Strips all MXP codes from a string.
"""
return self.mxp_sub.sub(r'\2', string)
def parse_ansi(self, string, strip_ansi=False, xterm256=False, mxp=False):
"""
Parses a string, subbing color codes according to
the stored mapping.
strip_ansi flag instead removes all ANSI markup.
"""
if hasattr(string, '_raw_string'):
if strip_ansi:
return string.clean()
else:
return string.raw()
if not string:
return ''
# check cached parsings
global _PARSE_CACHE
cachekey = "%s-%s-%s" % (string, strip_ansi, xterm256)
if cachekey in _PARSE_CACHE:
return _PARSE_CACHE[cachekey]
# pre-convert bright colors to xterm256 color tags
string = self.brightbg_sub.sub(self.sub_brightbg, string)
def do_xterm256(part):
return self.sub_xterm256(part, xterm256)
in_string = utils.to_str(string)
# do string replacement
parsed_string = ""
parts = self.ansi_escapes.split(in_string) + [" "]
for part, sep in zip(parts[::2], parts[1::2]):
pstring = self.xterm256_sub.sub(do_xterm256, part)
pstring = self.ansi_sub.sub(self.sub_ansi, pstring)
parsed_string += "%s%s" % (pstring, sep[0].strip())
if not mxp:
parsed_string = self.strip_mxp(parsed_string)
if strip_ansi:
# remove all ansi codes (including those manually
# inserted in string)
return self.strip_raw_codes(parsed_string)
# cache and crop old cache
_PARSE_CACHE[cachekey] = parsed_string
if len(_PARSE_CACHE) > _PARSE_CACHE_SIZE:
_PARSE_CACHE.popitem(last=False)
return parsed_string
# Mapping using {r {n etc
hilite = ANSI_HILITE
unhilite = ANSI_UNHILITE
ext_ansi_map = [
(r'{n', ANSI_NORMAL), # reset
(r'{/', ANSI_RETURN), # line break
(r'{-', ANSI_TAB), # tab
(r'{_', ANSI_SPACE), # space
(r'{*', ANSI_INVERSE), # invert
(r'{^', ANSI_BLINK), # blinking text (very annoying and not supported by all clients)
(r'{r', hilite + ANSI_RED),
(r'{g', hilite + ANSI_GREEN),
(r'{y', hilite + ANSI_YELLOW),
(r'{b', hilite + ANSI_BLUE),
(r'{m', hilite + ANSI_MAGENTA),
(r'{c', hilite + ANSI_CYAN),
(r'{w', hilite + ANSI_WHITE), # pure white
(r'{x', hilite + ANSI_BLACK), # dark grey
(r'{R', unhilite + ANSI_RED),
(r'{G', unhilite + ANSI_GREEN),
(r'{Y', unhilite + ANSI_YELLOW),
(r'{B', unhilite + ANSI_BLUE),
(r'{M', unhilite + ANSI_MAGENTA),
(r'{C', unhilite + ANSI_CYAN),
(r'{W', unhilite + ANSI_WHITE), # light grey
(r'{X', unhilite + ANSI_BLACK), # pure black
# hilight-able colors
(r'{h', hilite),
(r'{H', unhilite),
(r'{!R', ANSI_RED),
(r'{!G', ANSI_GREEN),
(r'{!Y', ANSI_YELLOW),
(r'{!B', ANSI_BLUE),
(r'{!M', ANSI_MAGENTA),
(r'{!C', ANSI_CYAN),
(r'{!W', ANSI_WHITE), # light grey
(r'{!X', ANSI_BLACK), # pure black
# normal ANSI backgrounds
(r'{[R', ANSI_BACK_RED),
(r'{[G', ANSI_BACK_GREEN),
(r'{[Y', ANSI_BACK_YELLOW),
(r'{[B', ANSI_BACK_BLUE),
(r'{[M', ANSI_BACK_MAGENTA),
(r'{[C', ANSI_BACK_CYAN),
(r'{[W', ANSI_BACK_WHITE), # light grey background
(r'{[X', ANSI_BACK_BLACK) # pure black background
]
ansi_bright_bgs = [
# "bright" ANSI backgrounds using xterm256 since ANSI
# standard does not support it (will
# fallback to dark ANSI background colors if xterm256
# is not supported by client)
(r'{[r', r'{[500'),
(r'{[g', r'{[050'),
(r'{[y', r'{[550'),
(r'{[b', r'{[005'),
(r'{[m', r'{[505'),
(r'{[c', r'{[055'),
(r'{[w', r'{[555'), # white background
(r'{[x', r'{[222')] # dark grey background
# xterm256 {123, %c134. These are replaced directly by
# the sub_xterm256 method
xterm256_map = [
(r'%[0-5]{3}', ""), # %123 - foreground colour
(r'%\[[0-5]{3}', ""), # %[123 - background colour
(r'\{[0-5]{3}', ""), # {123 - foreground colour
(r'\{\[[0-5]{3}', "") # {[123 - background colour
]
mxp_re = r'\{lc(.*?)\{lt(.*?)\{le'
# prepare regex matching
brightbg_sub = re.compile(r"|".join([re.escape(tup[0]) for tup in ansi_bright_bgs]), re.DOTALL)
xterm256_sub = re.compile(r"|".join([tup[0] for tup in xterm256_map]), re.DOTALL)
ansi_sub = re.compile(r"|".join([re.escape(tup[0]) for tup in ext_ansi_map]), re.DOTALL)
mxp_sub = re.compile(mxp_re, re.DOTALL)
# used by regex replacer to correctly map ansi sequences
ansi_map = dict(ext_ansi_map)
ansi_bright_bgs = dict(ansi_bright_bgs)
# prepare matching ansi codes overall
ansi_re = r"\033\[[0-9;]+m"
ansi_regex = re.compile(ansi_re)
# escapes - these double-chars will be replaced with a single
# instance of each
ansi_escapes = re.compile(r"(%s)" % "|".join(ANSI_ESCAPES), re.DOTALL)
ANSI_PARSER = ANSIParser()
#
# Access function
#
def parse_ansi(string, strip_ansi=False, parser=ANSI_PARSER, xterm256=False, mxp=False):
"""
Parses a string, subbing color codes as needed.
"""
return parser.parse_ansi(string, strip_ansi=strip_ansi, xterm256=xterm256, mxp=mxp)
def strip_raw_ansi(string, parser=ANSI_PARSER):
"""
Remove raw ansi codes from string
"""
return parser.strip_raw_codes(string)
def raw(string):
"""
Escapes a string into a form which won't be colorized by the ansi parser.
"""
return string.replace('{', '{{')
def group(lst, n):
for i in range(0, len(lst), n):
val = lst[i:i+n]
if len(val) == n:
yield tuple(val)
def _spacing_preflight(func):
"""
This wrapper function is used to do some preflight checks on functions used
for padding ANSIStrings.
"""
def wrapped(self, width, fillchar=None):
if fillchar is None:
fillchar = " "
if (len(fillchar) != 1) or (not isinstance(fillchar, basestring)):
raise TypeError("must be char, not %s" % type(fillchar))
if not isinstance(width, int):
raise TypeError("integer argument expected, got %s" % type(width))
difference = width - len(self)
if difference <= 0:
return self
return func(self, width, fillchar, difference)
return wrapped
def _query_super(func_name):
"""
Have the string class handle this with the cleaned string instead of
ANSIString.
"""
def wrapped(self, *args, **kwargs):
return getattr(self.clean(), func_name)(*args, **kwargs)
return wrapped
def _on_raw(func_name):
"""
Like query_super, but makes the operation run on the raw string.
"""
def wrapped(self, *args, **kwargs):
args = list(args)
try:
string = args.pop(0)
if hasattr(string, '_raw_string'):
args.insert(0, string.raw())
else:
args.insert(0, string)
except IndexError:
pass
result = getattr(self._raw_string, func_name)(*args, **kwargs)
if isinstance(result, basestring):
return ANSIString(result, decoded=True)
return result
return wrapped
def _transform(func_name):
"""
Some string functions, like those manipulating capital letters,
return a string the same length as the original. This function
allows us to do the same, replacing all the non-coded characters
with the resulting string.
"""
def wrapped(self, *args, **kwargs):
replacement_string = _query_super(func_name)(self, *args, **kwargs)
to_string = []
char_counter = 0
for index in range(0, len(self._raw_string)):
if index in self._code_indexes:
to_string.append(self._raw_string[index])
elif index in self._char_indexes:
to_string.append(replacement_string[char_counter])
char_counter += 1
return ANSIString(
''.join(to_string), decoded=True,
code_indexes=self._code_indexes, char_indexes=self._char_indexes,
clean_string=replacement_string)
return wrapped
class ANSIMeta(type):
"""
Many functions on ANSIString are just light wrappers around the unicode
base class. We apply them here, as part of the classes construction.
"""
def __init__(cls, *args, **kwargs):
for func_name in [
'count', 'startswith', 'endswith', 'find', 'index', 'isalnum',
'isalpha', 'isdigit', 'islower', 'isspace', 'istitle', 'isupper',
'rfind', 'rindex', '__len__']:
setattr(cls, func_name, _query_super(func_name))
for func_name in [
'__mod__', 'expandtabs', 'decode', 'replace', 'format',
'encode']:
setattr(cls, func_name, _on_raw(func_name))
for func_name in [
'capitalize', 'translate', 'lower', 'upper', 'swapcase']:
setattr(cls, func_name, _transform(func_name))
super(ANSIMeta, cls).__init__(*args, **kwargs)
class ANSIString(unicode):
"""
String-like object that is aware of ANSI codes.
This isn't especially efficient, as it doesn't really have an
understanding of what the codes mean in order to eliminate
redundant characters. This could be made as an enhancement to ANSI_PARSER.
If one is going to use ANSIString, one should generally avoid converting
away from it until one is about to send information on the wire. This is
because escape sequences in the string may otherwise already be decoded,
and taken literally the second time around.
Please refer to the Metaclass, ANSIMeta, which is used to apply wrappers
for several of the methods that need not be defined directly here.
"""
__metaclass__ = ANSIMeta
def __new__(cls, *args, **kwargs):
"""
When creating a new ANSIString, you may use a custom parser that has
the same attributes as the standard one, and you may declare the
string to be handled as already decoded. It is important not to double
decode strings, as escapes can only be respected once.
Internally, ANSIString can also passes itself precached code/character
indexes and clean strings to avoid doing extra work when combining
ANSIStrings.
"""
string = args[0]
if not isinstance(string, basestring):
string = to_str(string, force_string=True)
parser = kwargs.get('parser', ANSI_PARSER)
decoded = kwargs.get('decoded', False) or hasattr(string, '_raw_string')
code_indexes = kwargs.pop('code_indexes', None)
char_indexes = kwargs.pop('char_indexes', None)
clean_string = kwargs.pop('clean_string', None)
# All True, or All False, not just one.
checks = map(lambda x: x is None, [code_indexes, char_indexes, clean_string])
if not len(set(checks)) == 1:
raise ValueError("You must specify code_indexes, char_indexes, "
"and clean_string together, or not at all.")
if not all(checks):
decoded = True
if not decoded:
# Completely new ANSI String
clean_string = to_unicode(parser.parse_ansi(string, strip_ansi=True, mxp=True))
string = parser.parse_ansi(string, xterm256=True, mxp=True)
elif clean_string is not None:
# We have an explicit clean string.
pass
elif hasattr(string, '_clean_string'):
# It's already an ANSIString
clean_string = string._clean_string
code_indexes = string._code_indexes
char_indexes = string._char_indexes
string = string._raw_string
else:
# It's a string that has been pre-ansi decoded.
clean_string = parser.strip_raw_codes(string)
if not isinstance(string, unicode):
string = string.decode('utf-8')
ansi_string = super(ANSIString, cls).__new__(ANSIString, to_str(clean_string), "utf-8")
ansi_string._raw_string = string
ansi_string._clean_string = clean_string
ansi_string._code_indexes = code_indexes
ansi_string._char_indexes = char_indexes
return ansi_string
def __str__(self):
return self._raw_string.encode('utf-8')
def __unicode__(self):
"""
Unfortunately, this is not called during print() statements due to a
bug in the Python interpreter. You can always do unicode() or str()
around the resulting ANSIString and print that.
"""
return self._raw_string
def __repr__(self):
"""
Let's make the repr the command that would actually be used to
construct this object, for convenience and reference.
"""
return "ANSIString(%s, decoded=True)" % repr(self._raw_string)
def __init__(self, *_, **kwargs):
"""
When the ANSIString is first initialized, a few internal variables
have to be set.
The first is the parser. It is possible to replace Evennia's standard
ANSI parser with one of your own syntax if you wish, so long as it
implements the same interface.
The second is the _raw_string. It should be noted that the ANSIStrings
are unicode based. This seemed more reasonable than basing it off of
the string class, because if someone were to use a unicode character,
the benefits of knowing the indexes of the ANSI characters would be
negated by the fact that a character within the string might require
more than one byte to be represented. The raw string is, then, a
unicode object rather than a true encoded string. If you need the
encoded string for sending over the wire, try using the .encode()
method.
The third thing to set is the _clean_string. This is a unicode object
that is devoid of all ANSI Escapes.
Finally, _code_indexes and _char_indexes are defined. These are lookup
tables for which characters in the raw string are related to ANSI
escapes, and which are for the readable text.
"""
self.parser = kwargs.pop('parser', ANSI_PARSER)
super(ANSIString, self).__init__()
if self._code_indexes is None:
self._code_indexes, self._char_indexes = self._get_indexes()
@staticmethod
def _shifter(iterable, offset):
"""
Takes a list of integers, and produces a new one incrementing all
by a number.
"""
return [i + offset for i in iterable]
@classmethod
def _adder(cls, first, second):
"""
Joins two ANSIStrings, preserving calculated info.
"""
raw_string = first._raw_string + second._raw_string
clean_string = first._clean_string + second._clean_string
code_indexes = first._code_indexes[:]
char_indexes = first._char_indexes[:]
code_indexes.extend(
cls._shifter(second._code_indexes, len(first._raw_string)))
char_indexes.extend(
cls._shifter(second._char_indexes, len(first._raw_string)))
return ANSIString(raw_string, code_indexes=code_indexes,
char_indexes=char_indexes,
clean_string=clean_string)
def __add__(self, other):
"""
We have to be careful when adding two strings not to reprocess things
that don't need to be reprocessed, lest we end up with escapes being
interpreted literally.
"""
if not isinstance(other, basestring):
return NotImplemented
if not isinstance(other, ANSIString):
other = ANSIString(other)
return self._adder(self, other)
def __radd__(self, other):
"""
Likewise, if we're on the other end.
"""
if not isinstance(other, basestring):
return NotImplemented
if not isinstance(other, ANSIString):
other = ANSIString(other)
return self._adder(other, self)
def __getslice__(self, i, j):
"""
This function is deprecated, so we just make it call the proper
function.
"""
return self.__getitem__(slice(i, j))
def _slice(self, slc):
"""
This function takes a slice() object.
Slices have to be handled specially. Not only are they able to specify
a start and end with [x:y], but many forget that they can also specify
an interval with [x:y:z]. As a result, not only do we have to track
the ANSI Escapes that have played before the start of the slice, we
must also replay any in these intervals, should they exist.
Thankfully, slicing the _char_indexes table gives us the actual
indexes that need slicing in the raw string. We can check between
those indexes to figure out what escape characters need to be
replayed.
"""
slice_indexes = self._char_indexes[slc]
# If it's the end of the string, we need to append final color codes.
if not slice_indexes:
return ANSIString('')
try:
string = self[slc.start]._raw_string
except IndexError:
return ANSIString('')
last_mark = slice_indexes[0]
# Check between the slice intervals for escape sequences.
i = None
for i in slice_indexes[1:]:
for index in xrange(last_mark, i):
if index in self._code_indexes:
string += self._raw_string[index]
last_mark = i
try:
string += self._raw_string[i]
except IndexError:
pass
if i is not None:
append_tail = self._get_interleving(self._char_indexes.index(i) + 1)
else:
append_tail = ''
return ANSIString(string + append_tail, decoded=True)
def __getitem__(self, item):
"""
Gateway for slices and getting specific indexes in the ANSIString. If
this is a regexable ANSIString, it will get the data from the raw
string instead, bypassing ANSIString's intelligent escape skipping,
for reasons explained in the __new__ method's docstring.
"""
if isinstance(item, slice):
# Slices must be handled specially.
return self._slice(item)
try:
self._char_indexes[item]
except IndexError:
raise IndexError("ANSIString Index out of range")
# Get character codes after the index as well.
if self._char_indexes[-1] == self._char_indexes[item]:
append_tail = self._get_interleving(item + 1)
else:
append_tail = ''
item = self._char_indexes[item]
clean = self._raw_string[item]
result = ''
# Get the character they're after, and replay all escape sequences
# previous to it.
for index in xrange(0, item + 1):
if index in self._code_indexes:
result += self._raw_string[index]
return ANSIString(result + clean + append_tail, decoded=True)
def clean(self):
"""
Return a unicode object without the ANSI escapes.
"""
return self._clean_string
def raw(self):
"""
Return a unicode object with the ANSI escapes.
"""
return self._raw_string
def partition(self, sep, reverse=False):
"""
Similar to split, but always creates a tuple with three items:
1. The part before the separator
2. The separator itself.
3. The part after.
We use the same techniques we used in split() to make sure each are
colored.
"""
if hasattr(sep, '_clean_string'):
sep = sep.clean()
if reverse:
parent_result = self._clean_string.rpartition(sep)
else:
parent_result = self._clean_string.partition(sep)
current_index = 0
result = tuple()
for section in parent_result:
result += (self[current_index:current_index + len(section)],)
current_index += len(section)
return result
def _get_indexes(self):
"""
Two tables need to be made, one which contains the indexes of all
readable characters, and one which contains the indexes of all ANSI
escapes. It's important to remember that ANSI escapes require more
that one character at a time, though no readable character needs more
than one character, since the unicode base class abstracts that away
from us. However, several readable characters can be placed in a row.
We must use regexes here to figure out where all the escape sequences
are hiding in the string. Then we use the ranges of their starts and
ends to create a final, comprehensive list of all indexes which are
dedicated to code, and all dedicated to text.
It's possible that only one of these tables is actually needed, the
other assumed to be what isn't in the first.
"""
code_indexes = []
for match in self.parser.ansi_regex.finditer(self._raw_string):
code_indexes.extend(range(match.start(), match.end()))
if not code_indexes:
# Plain string, no ANSI codes.
return code_indexes, range(0, len(self._raw_string))
# all indexes not occupied by ansi codes are normal characters
char_indexes = [i for i in range(len(self._raw_string)) if i not in code_indexes]
return code_indexes, char_indexes
def _get_interleving(self, index):
"""
Get the code characters from the given slice end to the next
character.
"""
try:
index = self._char_indexes[index - 1]
except IndexError:
return ''
s = ''
while True:
index += 1
if index in self._char_indexes:
break
elif index in self._code_indexes:
s += self._raw_string[index]
else:
break
return s
def split(self, by, maxsplit=-1):
"""
Stolen from PyPy's pure Python string implementation, tweaked for
ANSIString.
PyPy is distributed under the MIT licence.
http://opensource.org/licenses/MIT
"""
bylen = len(by)
if bylen == 0:
raise ValueError("empty separator")
res = []
start = 0
while maxsplit != 0:
next = self._clean_string.find(by, start)
if next < 0:
break
# Get character codes after the index as well.
res.append(self[start:next])
start = next + bylen
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res.append(self[start:len(self)])
return res
def __mul__(self, other):
"""
Multiplication method. Implemented for performance reasons.
"""
if not isinstance(other, int):
return NotImplemented
raw_string = self._raw_string * other
clean_string = self._clean_string * other
code_indexes = self._code_indexes[:]
char_indexes = self._char_indexes[:]
for i in range(1, other + 1):
code_indexes.extend(
self._shifter(self._code_indexes, i * len(self._raw_string)))
char_indexes.extend(
self._shifter(self._char_indexes, i * len(self._raw_string)))
return ANSIString(
raw_string, code_indexes=code_indexes, char_indexes=char_indexes,
clean_string=clean_string)
def __rmul__(self, other):
return self.__mul__(other)
def rsplit(self, by, maxsplit=-1):
"""
Stolen from PyPy's pure Python string implementation, tweaked for
ANSIString.
PyPy is distributed under the MIT licence.
http://opensource.org/licenses/MIT
"""
res = []
end = len(self)
bylen = len(by)
if bylen == 0:
raise ValueError("empty separator")
while maxsplit != 0:
next = self._clean_string.rfind(by, 0, end)
if next < 0:
break
# Get character codes after the index as well.
res.append(self[next+bylen:end])
end = next
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res.append(self[:end])
res.reverse()
return res
def join(self, iterable):
"""
Joins together strings in an iterable.
"""
result = ANSIString('')
last_item = None
for item in iterable:
if last_item is not None:
result += self._raw_string
if not isinstance(item, ANSIString):
item = ANSIString(item)
result += item
last_item = item
return result
def _filler(self, char, amount):
"""
Generate a line of characters in a more efficient way than just adding
ANSIStrings.
"""
if not isinstance(char, ANSIString):
line = char * amount
return ANSIString(
char * amount, code_indexes=[], char_indexes=range(0, len(line)),
clean_string=char)
try:
start = char._code_indexes[0]
except IndexError:
start = None
end = char._char_indexes[0]
prefix = char._raw_string[start:end]
postfix = char._raw_string[end + 1:]
line = char._clean_string * amount
code_indexes = [i for i in range(0, len(prefix))]
length = len(prefix) + len(line)
code_indexes.extend([i for i in range(length, length + len(postfix))])
char_indexes = self._shifter(xrange(0, len(line)), len(prefix))
raw_string = prefix + line + postfix
return ANSIString(
raw_string, clean_string=line, char_indexes=char_indexes,
code_indexes=code_indexes)
@_spacing_preflight
def center(self, width, fillchar, difference):
"""
Center some text with some spaces padding both sides.
"""
remainder = difference % 2
difference /= 2
spacing = self._filler(fillchar, difference)
result = spacing + self + spacing + self._filler(fillchar, remainder)
return result
@_spacing_preflight
def ljust(self, width, fillchar, difference):
"""
Left justify some text.
"""
return self + self._filler(fillchar, difference)
@_spacing_preflight
def rjust(self, width, fillchar, difference):
"""
Right justify some text.
"""
return self._filler(fillchar, difference) + self
| bsd-3-clause |
VOLTTRON/volttron-applications | kisensum/openadr/openadr/vtn/migrations/0005_auto_20171020_2019.py | 2 | 1165 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-20 20:19
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('vtn', '0004_auto_20171020_2019'),
]
operations = [
migrations.AlterField(
model_name='drevent',
name='last_status_time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2017, 10, 20, 20, 19, 42, 780617, tzinfo=utc), null=True, verbose_name='Last Status Time'),
),
migrations.AlterField(
model_name='siteevent',
name='last_opt_in',
field=models.DateTimeField(blank=True, default=datetime.datetime(2017, 10, 20, 20, 19, 42, 781512, tzinfo=utc), null=True, verbose_name='Last opt-in'),
),
migrations.AlterField(
model_name='siteevent',
name='last_status_time',
field=models.DateTimeField(default=datetime.datetime(2017, 10, 20, 20, 19, 42, 781464, tzinfo=utc), verbose_name='Last Status Time'),
),
]
| bsd-3-clause |
botify-labs/simpleflow | simpleflow/swf/process/worker/base.py | 1 | 13859 | import json
import multiprocessing
import os
import sys
import traceback
import uuid
from base64 import b64decode
import psutil
import swf.actors
import swf.exceptions
from simpleflow import format, logger, settings
from simpleflow.dispatch import dynamic_dispatcher
from simpleflow.download import download_binaries
from simpleflow.exceptions import ExecutionError
from simpleflow.job import KubernetesJob
from simpleflow.process import Supervisor, with_state
from simpleflow.swf.constants import VALID_PROCESS_MODES
from simpleflow.swf.process import Poller
from simpleflow.swf.task import ActivityTask
from simpleflow.swf.utils import sanitize_activity_context
from simpleflow.utils import format_exc, format_exc_type, json_dumps, to_k8s_identifier
from swf.models import ActivityTask as BaseActivityTask
from swf.responses import Response
class Worker(Supervisor):
def __init__(self, poller, nb_children=None):
self._poller = poller
super(Worker, self).__init__(
payload=self._poller.start, nb_children=nb_children,
)
class ActivityPoller(Poller, swf.actors.ActivityWorker):
"""
Polls an activity and handles it in the worker.
"""
def __init__(
self,
domain,
task_list,
middlewares=None,
heartbeat=60,
process_mode=None,
poll_data=None,
):
"""
:param domain:
:type domain:
:param task_list:
:type task_list:
:param middlewares: Paths to middleware functions to execute before and after any Activity
:type middlewares: Optional[Dict[str, str]]
:param heartbeat:
:type heartbeat:
:param process_mode: Whether to process locally (default) or spawn a Kubernetes job.
:type process_mode: Optional[str]
"""
self.nb_retries = 3
# heartbeat=0 is a special value to disable heartbeating. We want to
# replace it by None because multiprocessing.Process.join() treats
# this as "no timeout"
self._heartbeat = heartbeat or None
self.middlewares = middlewares
self.process_mode = process_mode or "local"
assert (
self.process_mode in VALID_PROCESS_MODES
), 'invalid process_mode "{}"'.format(self.process_mode)
self.poll_data = poll_data
super(ActivityPoller, self).__init__(domain, task_list)
@property
def name(self):
return "{}(task_list={})".format(self.__class__.__name__, self.task_list,)
@with_state("polling")
def poll(self, task_list=None, identity=None):
if self.poll_data:
# the poll data has been passed as input
return self.fake_poll()
else:
# we need to poll SWF's PollForActivityTask
return swf.actors.ActivityWorker.poll(self, task_list, identity)
def fake_poll(self):
polled_activity_data = json.loads(b64decode(self.poll_data))
activity_task = BaseActivityTask.from_poll(
self.domain, self.task_list, polled_activity_data,
)
return Response(
task_token=activity_task.task_token,
activity_task=activity_task,
raw_response=polled_activity_data,
)
@with_state("processing")
def process(self, response):
"""
Process a swf.actors.ActivityWorker poll response..
:param response:
:type response: swf.responses.Response
"""
token = response.task_token
task = response.activity_task
if self.process_mode == "kubernetes":
try:
spawn_kubernetes_job(self, response.raw_response)
except Exception as err:
logger.exception("spawn_kubernetes_job error")
reason = "cannot spawn kubernetes job for task {}: {} {}".format(
task.activity_id, err.__class__.__name__, err,
)
self.fail_with_retry(token, task, reason)
else:
spawn(self, token, task, self.middlewares, self._heartbeat)
@with_state("completing")
def complete(self, token, result=None):
swf.actors.ActivityWorker.complete(self, token, result)
# noinspection PyMethodOverriding
@with_state("failing")
def fail(self, token, task, reason=None, details=None):
"""
Fail the activity, log and ignore exceptions.
:param token:
:type token:
:param task:
:type task:
:param reason:
:type reason:
:param details:
:type details:
:return:
:rtype:
"""
try:
return swf.actors.ActivityWorker.fail(
self, token, reason=reason, details=details,
)
except Exception as err:
logger.error(
"cannot fail task {}: {}".format(task.activity_type.name, err,)
)
@property
def identity(self):
if self.process_mode == "kubernetes":
self.job_name = "{}--{}".format(
to_k8s_identifier(self.task_list), str(uuid.uuid4())
)
return json_dumps(
{
"cluster": os.environ["K8S_CLUSTER"],
"namespace": os.environ["K8S_NAMESPACE"],
"job": self.job_name,
}
)
else:
return super(ActivityPoller, self).identity
class ActivityWorker(object):
def __init__(self, dispatcher=None):
self._dispatcher = dispatcher or dynamic_dispatcher.Dispatcher()
def dispatch(self, task):
"""
:param task:
:type task: swf.models.ActivityTask
:return:
:rtype: simpleflow.activity.Activity
"""
name = task.activity_type.name
return self._dispatcher.dispatch_activity(name)
def process(self, poller, token, task, middlewares=None):
"""
:param poller:
:type poller: ActivityPoller
:param token:
:type token: str
:param task:
:type task: swf.models.ActivityTask
:param middlewares: Paths to middleware functions to execute before and after any Activity
:type middlewares: Optional[Dict[str, str]]
"""
logger.debug("ActivityWorker.process() pid={}".format(os.getpid()))
try:
activity = self.dispatch(task)
input = format.decode(task.input)
args = input.get("args", ())
kwargs = input.get("kwargs", {})
context = sanitize_activity_context(task.context)
context["domain_name"] = poller.domain.name
if input.get("meta", {}).get("binaries"):
download_binaries(input["meta"]["binaries"])
result = ActivityTask(
activity,
*args,
context=context,
simpleflow_middlewares=middlewares,
**kwargs
).execute()
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.exception("process error: {}".format(str(exc_value)))
if isinstance(exc_value, ExecutionError) and len(exc_value.args):
details = exc_value.args[0]
reason = format_exc(exc_value) # FIXME json.loads and rebuild?
else:
tb = traceback.format_tb(exc_traceback)
reason = format_exc(exc_value)
details = json_dumps(
{
"error": exc_type.__name__,
"error_type": format_exc_type(exc_type),
"message": str(exc_value),
"traceback": tb,
},
default=repr,
)
return poller.fail_with_retry(token, task, reason=reason, details=details)
try:
logger.info("completing activity")
poller.complete_with_retry(token, result)
except Exception as err:
logger.exception("complete error")
reason = "cannot complete task {}: {} {}".format(
task.activity_id, err.__class__.__name__, err,
)
poller.fail_with_retry(token, task, reason)
def process_task(poller, token, task, middlewares=None):
"""
:param poller:
:type poller: ActivityPoller
:param token:
:type token: str
:param task:
:type task: swf.models.ActivityTask
:param middlewares: Paths to middleware functions to execute before and after any Activity
:type middlewares: Optional[Dict[str, str]]
"""
logger.debug("process_task() pid={}".format(os.getpid()))
format.JUMBO_FIELDS_MEMORY_CACHE.clear()
worker = ActivityWorker()
worker.process(poller, token, task, middlewares)
def spawn_kubernetes_job(poller, swf_response):
logger.info("scheduling new kubernetes job name={}".format(poller.job_name))
job = KubernetesJob(poller.job_name, poller.domain.name, swf_response)
job.schedule()
def reap_process_tree(pid, wait_timeout=settings.ACTIVITY_SIGTERM_WAIT_SEC):
"""
TERMinates (and KILLs) if necessary a process and its descendants.
See also: https://psutil.readthedocs.io/en/latest/#kill-process-tree.
:param pid: Process ID
:type pid: int
:param wait_timeout: Wait timeout
:type wait_timeout: float
"""
def on_terminate(p):
logger.info("process: terminated pid={} retcode={}".format(p.pid, p.returncode))
if pid == os.getpid():
raise RuntimeError("process: cannot terminate self!")
parent = psutil.Process(pid)
procs = parent.children(recursive=True)
procs.append(parent)
# Terminate
for p in procs:
try:
p.terminate()
except psutil.NoSuchProcess:
pass
_, alive = psutil.wait_procs(procs, timeout=wait_timeout, callback=on_terminate)
# Kill
for p in alive:
logger.warning(
"process: pid={} status={} did not respond to SIGTERM. Trying SIGKILL".format(
p.pid, p.status()
)
)
try:
p.kill()
except psutil.NoSuchProcess:
pass
# Check
_, alive = psutil.wait_procs(alive)
for p in alive:
logger.error(
"process: pid={} status={} still alive. Giving up!".format(
p.pid, p.status()
)
)
def spawn(poller, token, task, middlewares=None, heartbeat=60):
"""
Spawn a process and wait for it to end, sending heartbeats to SWF.
On activity timeouts and termination, we reap the worker process and its
children.
:param poller:
:type poller: ActivityPoller
:param token:
:type token: str
:param task:
:type task: swf.models.ActivityTask
:param middlewares: Paths to middleware functions to execute before and after any Activity
:type middlewares: Optional[Dict[str, str]]
:param heartbeat: heartbeat delay (seconds)
:type heartbeat: int
"""
logger.info(
"spawning new activity worker pid={} heartbeat={}".format(
os.getpid(), heartbeat
)
)
worker = multiprocessing.Process(
target=process_task, args=(poller, token, task, middlewares)
)
worker.start()
def worker_alive():
return psutil.pid_exists(worker.pid)
while worker_alive():
worker.join(timeout=heartbeat)
if not worker_alive():
# Most certainly unneeded: we'll see
if worker.exitcode is None:
# race condition, try and re-join
worker.join(timeout=0)
if worker.exitcode is None:
logger.warning(
"process {} is dead but multiprocessing doesn't know it (simpleflow bug)".format(
worker.pid
)
)
if worker.exitcode != 0:
poller.fail_with_retry(
token,
task,
reason="process {} died: exit code {}".format(
worker.pid, worker.exitcode
),
)
return
try:
logger.debug("heartbeating for pid={} (token={})".format(worker.pid, token))
response = poller.heartbeat(token)
except swf.exceptions.DoesNotExistError as error:
# Either the task or the workflow execution no longer exists,
# let's kill the worker process.
logger.warning("heartbeat failed: {}".format(error))
logger.warning("killing (KILL) worker with pid={}".format(worker.pid))
reap_process_tree(worker.pid)
return
except swf.exceptions.RateLimitExceededError as error:
# ignore rate limit errors: high chances the next heartbeat will be
# ok anyway, so it would be stupid to break the task for that
logger.warning(
'got a "ThrottlingException / Rate exceeded" when heartbeating for task {}: {}'.format(
task.activity_type.name, error
)
)
continue
except Exception as error:
# Let's crash if it cannot notify the heartbeat failed. The
# subprocess will become orphan and the heartbeat timeout may
# eventually trigger on Amazon SWF side.
logger.error(
"cannot send heartbeat for task {}: {}".format(
task.activity_type.name, error
)
)
raise
# Task cancelled.
if response and response.get("cancelRequested"):
reap_process_tree(worker.pid)
return
| mit |
repotvsupertuga/tvsupertuga.repository | script.module.streamlink.base/resources/lib/streamlink/utils/six.py | 172 | 30888 | # Copyright (c) 2010-2017 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utilities for writing code that runs on Python 2 and 3"""
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.11.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
try:
if from_value is None:
raise value
raise value from from_value
finally:
value = None
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| gpl-2.0 |
lokirius/python-for-android | python3-alpha/python3-src/Lib/ctypes/test/test_loading.py | 52 | 4097 | from ctypes import *
import sys, unittest
import os
from ctypes.util import find_library
from ctypes.test import is_resource_enabled
libc_name = None
if os.name == "nt":
libc_name = find_library("c")
elif os.name == "ce":
libc_name = "coredll"
elif sys.platform == "cygwin":
libc_name = "cygwin1.dll"
else:
libc_name = find_library("c")
if is_resource_enabled("printing"):
print("libc_name is", libc_name)
class LoaderTest(unittest.TestCase):
unknowndll = "xxrandomnamexx"
if libc_name is not None:
def test_load(self):
CDLL(libc_name)
CDLL(os.path.basename(libc_name))
self.assertRaises(OSError, CDLL, self.unknowndll)
if libc_name is not None and os.path.basename(libc_name) == "libc.so.6":
def test_load_version(self):
cdll.LoadLibrary("libc.so.6")
# linux uses version, libc 9 should not exist
self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9")
self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll)
def test_find(self):
for name in ("c", "m"):
lib = find_library(name)
if lib:
cdll.LoadLibrary(lib)
CDLL(lib)
if os.name in ("nt", "ce"):
def test_load_library(self):
self.assertFalse(libc_name is None)
if is_resource_enabled("printing"):
print(find_library("kernel32"))
print(find_library("user32"))
if os.name == "nt":
windll.kernel32.GetModuleHandleW
windll["kernel32"].GetModuleHandleW
windll.LoadLibrary("kernel32").GetModuleHandleW
WinDLL("kernel32").GetModuleHandleW
elif os.name == "ce":
windll.coredll.GetModuleHandleW
windll["coredll"].GetModuleHandleW
windll.LoadLibrary("coredll").GetModuleHandleW
WinDLL("coredll").GetModuleHandleW
def test_load_ordinal_functions(self):
import _ctypes_test
dll = WinDLL(_ctypes_test.__file__)
# We load the same function both via ordinal and name
func_ord = dll[2]
func_name = dll.GetString
# addressof gets the address where the function pointer is stored
a_ord = addressof(func_ord)
a_name = addressof(func_name)
f_ord_addr = c_void_p.from_address(a_ord).value
f_name_addr = c_void_p.from_address(a_name).value
self.assertEqual(hex(f_ord_addr), hex(f_name_addr))
self.assertRaises(AttributeError, dll.__getitem__, 1234)
if os.name == "nt":
def test_1703286_A(self):
from _ctypes import LoadLibrary, FreeLibrary
# On winXP 64-bit, advapi32 loads at an address that does
# NOT fit into a 32-bit integer. FreeLibrary must be able
# to accept this address.
# These are tests for http://www.python.org/sf/1703286
handle = LoadLibrary("advapi32")
FreeLibrary(handle)
def test_1703286_B(self):
# Since on winXP 64-bit advapi32 loads like described
# above, the (arbitrarily selected) CloseEventLog function
# also has a high address. 'call_function' should accept
# addresses so large.
from _ctypes import call_function
advapi32 = windll.advapi32
# Calling CloseEventLog with a NULL argument should fail,
# but the call should not segfault or so.
self.assertEqual(0, advapi32.CloseEventLog(None))
windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p
windll.kernel32.GetProcAddress.restype = c_void_p
proc = windll.kernel32.GetProcAddress(advapi32._handle, b"CloseEventLog")
self.assertTrue(proc)
# This is the real test: call the function via 'call_function'
self.assertEqual(0, call_function(proc, (None,)))
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
brian-yang/mozillians | vendor-local/lib/python/django_filters/filterset.py | 16 | 15216 | from __future__ import absolute_import
from __future__ import unicode_literals
import types
import copy
from django import forms
from django.core.validators import EMPTY_VALUES
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from sys import version_info
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError: # pragma: nocover
# Django < 1.5 fallback
from django.db.models.sql.constants import LOOKUP_SEP # noqa
try:
from collections import OrderedDict
except ImportError: # pragma: nocover
# Django < 1.5 fallback
from django.utils.datastructures import SortedDict as OrderedDict # noqa
try:
from django.db.models.related import RelatedObject as ForeignObjectRel
except ImportError: # pragma: nocover
# Django >= 1.8 replaces RelatedObject with ForeignObjectRel
from django.db.models.fields.related import ForeignObjectRel
from .filters import (Filter, CharFilter, BooleanFilter,
ChoiceFilter, DateFilter, DateTimeFilter, TimeFilter, ModelChoiceFilter,
ModelMultipleChoiceFilter, NumberFilter)
ORDER_BY_FIELD = 'o'
# There is a bug with deepcopy in 2.6, patch if we are running python < 2.7
# http://bugs.python.org/issue1515
if version_info < (2, 7, 0):
def _deepcopy_method(x, memo):
return type(x)(x.im_func, copy.deepcopy(x.im_self, memo), x.im_class)
copy._deepcopy_dispatch[types.MethodType] = _deepcopy_method
def get_declared_filters(bases, attrs, with_base_filters=True):
filters = []
for filter_name, obj in list(attrs.items()):
if isinstance(obj, Filter):
obj = attrs.pop(filter_name)
if getattr(obj, 'name', None) is None:
obj.name = filter_name
filters.append((filter_name, obj))
filters.sort(key=lambda x: x[1].creation_counter)
if with_base_filters:
for base in bases[::-1]:
if hasattr(base, 'base_filters'):
filters = list(base.base_filters.items()) + filters
else:
for base in bases[::-1]:
if hasattr(base, 'declared_filters'):
filters = list(base.declared_filters.items()) + filters
return OrderedDict(filters)
def get_model_field(model, f):
parts = f.split(LOOKUP_SEP)
opts = model._meta
for name in parts[:-1]:
try:
rel = opts.get_field_by_name(name)[0]
except FieldDoesNotExist:
return None
if isinstance(rel, ForeignObjectRel):
model = rel.model
opts = rel.opts
else:
model = rel.rel.to
opts = model._meta
try:
rel, model, direct, m2m = opts.get_field_by_name(parts[-1])
except FieldDoesNotExist:
return None
return rel
def filters_for_model(model, fields=None, exclude=None, filter_for_field=None,
filter_for_reverse_field=None):
field_dict = OrderedDict()
opts = model._meta
if fields is None:
fields = [f.name for f in sorted(opts.fields + opts.many_to_many)
if not isinstance(f, models.AutoField)]
# Loop through the list of fields.
for f in fields:
# Skip the field if excluded.
if exclude is not None and f in exclude:
continue
field = get_model_field(model, f)
# Do nothing if the field doesn't exist.
if field is None:
field_dict[f] = None
continue
if isinstance(field, ForeignObjectRel):
filter_ = filter_for_reverse_field(field, f)
if filter_:
field_dict[f] = filter_
# If fields is a dictionary, it must contain lists.
elif isinstance(fields, dict):
# Create a filter for each lookup type.
for lookup_type in fields[f]:
filter_ = filter_for_field(field, f, lookup_type)
if filter_:
filter_name = f
# Don't add "exact" to filter names
if lookup_type != 'exact':
filter_name = f + LOOKUP_SEP + lookup_type
field_dict[filter_name] = filter_
# If fields is a list, it contains strings.
else:
filter_ = filter_for_field(field, f)
if filter_:
field_dict[f] = filter_
return field_dict
class FilterSetOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.order_by = getattr(options, 'order_by', False)
self.form = getattr(options, 'form', forms.Form)
class FilterSetMetaclass(type):
def __new__(cls, name, bases, attrs):
try:
parents = [b for b in bases if issubclass(b, FilterSet)]
except NameError:
# We are defining FilterSet itself here
parents = None
declared_filters = get_declared_filters(bases, attrs, False)
new_class = super(
FilterSetMetaclass, cls).__new__(cls, name, bases, attrs)
if not parents:
return new_class
opts = new_class._meta = FilterSetOptions(
getattr(new_class, 'Meta', None))
if opts.model:
filters = filters_for_model(opts.model, opts.fields, opts.exclude,
new_class.filter_for_field,
new_class.filter_for_reverse_field)
filters.update(declared_filters)
else:
filters = declared_filters
if None in filters.values():
raise TypeError("Meta.fields contains a field that isn't defined "
"on this FilterSet")
new_class.declared_filters = declared_filters
new_class.base_filters = filters
return new_class
FILTER_FOR_DBFIELD_DEFAULTS = {
models.AutoField: {
'filter_class': NumberFilter
},
models.CharField: {
'filter_class': CharFilter
},
models.TextField: {
'filter_class': CharFilter
},
models.BooleanField: {
'filter_class': BooleanFilter
},
models.DateField: {
'filter_class': DateFilter
},
models.DateTimeField: {
'filter_class': DateTimeFilter
},
models.TimeField: {
'filter_class': TimeFilter
},
models.OneToOneField: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
'to_field_name': f.rel.field_name,
}
},
models.ForeignKey: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
'to_field_name': f.rel.field_name
}
},
models.ManyToManyField: {
'filter_class': ModelMultipleChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
}
},
models.DecimalField: {
'filter_class': NumberFilter,
},
models.SmallIntegerField: {
'filter_class': NumberFilter,
},
models.IntegerField: {
'filter_class': NumberFilter,
},
models.PositiveIntegerField: {
'filter_class': NumberFilter,
},
models.PositiveSmallIntegerField: {
'filter_class': NumberFilter,
},
models.FloatField: {
'filter_class': NumberFilter,
},
models.NullBooleanField: {
'filter_class': BooleanFilter,
},
models.SlugField: {
'filter_class': CharFilter,
},
models.EmailField: {
'filter_class': CharFilter,
},
models.FilePathField: {
'filter_class': CharFilter,
},
models.URLField: {
'filter_class': CharFilter,
},
models.IPAddressField: {
'filter_class': CharFilter,
},
models.CommaSeparatedIntegerField: {
'filter_class': CharFilter,
},
}
class BaseFilterSet(object):
filter_overrides = {}
order_by_field = ORDER_BY_FIELD
strict = True
def __init__(self, data=None, queryset=None, prefix=None, strict=None):
self.is_bound = data is not None
self.data = data or {}
if queryset is None:
queryset = self._meta.model._default_manager.all()
self.queryset = queryset
self.form_prefix = prefix
if strict is not None:
self.strict = strict
self.filters = copy.deepcopy(self.base_filters)
# propagate the model being used through the filters
for filter_ in self.filters.values():
filter_.model = self._meta.model
# Apply the parent to the filters, this will allow the filters to access the filterset
for filter_key, filter_ in six.iteritems(self.filters):
filter_.parent = self
def __iter__(self):
for obj in self.qs:
yield obj
def __len__(self):
return len(self.qs)
def __getitem__(self, key):
return self.qs[key]
@property
def qs(self):
if not hasattr(self, '_qs'):
valid = self.is_bound and self.form.is_valid()
if self.strict and self.is_bound and not valid:
self._qs = self.queryset.none()
return self._qs
# start with all the results and filter from there
qs = self.queryset.all()
for name, filter_ in six.iteritems(self.filters):
value = None
if valid:
value = self.form.cleaned_data[name]
else:
raw_value = self.form[name].value()
try:
value = self.form.fields[name].clean(raw_value)
except forms.ValidationError:
# for invalid values either:
# strictly "apply" filter yielding no results and get outta here
if self.strict:
self._qs = self.queryset.none()
return self._qs
else: # or ignore this filter altogether
pass
if value is not None: # valid & clean data
qs = filter_.filter(qs, value)
if self._meta.order_by:
order_field = self.form.fields[self.order_by_field]
data = self.form[self.order_by_field].data
ordered_value = None
try:
ordered_value = order_field.clean(data)
except forms.ValidationError:
pass
if ordered_value in EMPTY_VALUES and self.strict:
ordered_value = self.form.fields[self.order_by_field].choices[0][0]
if ordered_value:
qs = qs.order_by(*self.get_order_by(ordered_value))
self._qs = qs
return self._qs
def count(self):
return self.qs.count()
@property
def form(self):
if not hasattr(self, '_form'):
fields = OrderedDict([
(name, filter_.field)
for name, filter_ in six.iteritems(self.filters)])
fields[self.order_by_field] = self.ordering_field
Form = type(str('%sForm' % self.__class__.__name__),
(self._meta.form,), fields)
if self.is_bound:
self._form = Form(self.data, prefix=self.form_prefix)
else:
self._form = Form(prefix=self.form_prefix)
return self._form
def get_ordering_field(self):
if self._meta.order_by:
if isinstance(self._meta.order_by, (list, tuple)):
if isinstance(self._meta.order_by[0], (list, tuple)):
# e.g. (('field', 'Display name'), ...)
choices = [(f[0], f[1]) for f in self._meta.order_by]
else:
choices = [(f, _('%s (descending)' % capfirst(f[1:])) if f[0] == '-' else capfirst(f))
for f in self._meta.order_by]
else:
# add asc and desc field names
# use the filter's label if provided
choices = []
for f, fltr in self.filters.items():
choices.extend([
(fltr.name or f, fltr.label or capfirst(f)),
("-%s" % (fltr.name or f), _('%s (descending)' % (fltr.label or capfirst(f))))
])
return forms.ChoiceField(label=_("Ordering"), required=False,
choices=choices)
@property
def ordering_field(self):
if not hasattr(self, '_ordering_field'):
self._ordering_field = self.get_ordering_field()
return self._ordering_field
def get_order_by(self, order_choice):
return [order_choice]
@classmethod
def filter_for_field(cls, f, name, lookup_type='exact'):
filter_for_field = dict(FILTER_FOR_DBFIELD_DEFAULTS)
filter_for_field.update(cls.filter_overrides)
default = {
'name': name,
'label': capfirst(f.verbose_name),
'lookup_type': lookup_type
}
if f.choices:
default['choices'] = f.choices
return ChoiceFilter(**default)
data = filter_for_field.get(f.__class__)
if data is None:
# could be a derived field, inspect parents
for class_ in f.__class__.mro():
# skip if class_ is models.Field or object
# 1st item in mro() is original class
if class_ in (f.__class__, models.Field, object):
continue
data = filter_for_field.get(class_)
if data:
break
if data is None:
return
filter_class = data.get('filter_class')
default.update(data.get('extra', lambda f: {})(f))
if filter_class is not None:
return filter_class(**default)
@classmethod
def filter_for_reverse_field(cls, f, name):
rel = f.field.rel
queryset = f.field.model._default_manager.all()
default = {
'name': name,
'label': capfirst(rel.related_name),
'queryset': queryset,
}
if rel.multiple:
return ModelMultipleChoiceFilter(**default)
else:
return ModelChoiceFilter(**default)
class FilterSet(six.with_metaclass(FilterSetMetaclass, BaseFilterSet)):
pass
def filterset_factory(model):
meta = type(str('Meta'), (object,), {'model': model})
filterset = type(str('%sFilterSet' % model._meta.object_name),
(FilterSet,), {'Meta': meta})
return filterset
| bsd-3-clause |
beni55/furious.js | configure.py | 3 | 4556 | #!/usr/bin/python
from __future__ import print_function
import optparse
import os
import sys
import glob
import ninja_syntax
def replace_ext(filename, ext):
return os.path.splitext(filename)[0] + ext
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("--with-protoc-c", dest="protoc_c", default="protoc-c")
parser.add_option("--with-nacl-sdk", dest="nacl_sdk", default=os.getenv("NACL_SDK_ROOT"))
options, _ = parser.parse_args()
root_dir = os.path.dirname(os.path.abspath(__file__))
with open('build.ninja', 'w') as buildfile:
ninja = ninja_syntax.Writer(buildfile)
# Variables
ninja.variable('nacl_sdk_dir', options.nacl_sdk)
if sys.platform == 'win32':
ninja.variable('pnacl_toolchain_dir', '$nacl_sdk_dir/toolchain/win_pnacl')
ninja.variable('pnacl_cc', '$pnacl_toolchain_dir/bin/pnacl-clang.bat')
ninja.variable('pnacl_cxx', '$pnacl_toolchain_dir/bin/pnacl-clang++.bat')
ninja.variable('pnacl_finalize', '$pnacl_toolchain_dir/bin/pnacl-finalize.bat')
elif sys.platform == 'linux2':
ninja.variable('pnacl_toolchain_dir', '$nacl_sdk_dir/toolchain/linux_pnacl')
ninja.variable('pnacl_cc', '$pnacl_toolchain_dir/bin/pnacl-clang')
ninja.variable('pnacl_cxx', '$pnacl_toolchain_dir/bin/pnacl-clang++')
ninja.variable('pnacl_finalize', '$pnacl_toolchain_dir/bin/pnacl-finalize')
elif sys.platform == 'darwin':
ninja.variable('pnacl_toolchain_dir', '$nacl_sdk_dir/toolchain/mac_pnacl')
ninja.variable('pnacl_cc', '$pnacl_toolchain_dir/bin/pnacl-clang')
ninja.variable('pnacl_cxx', '$pnacl_toolchain_dir/bin/pnacl-clang++')
ninja.variable('pnacl_finalize', '$pnacl_toolchain_dir/bin/pnacl-finalize')
else:
print("Unsupported platform: " + sys.platform, file=sys.stderr)
exit(1)
ninja.variable('protoc_c', options.protoc_c)
# Rules
ninja.rule('COMPILE_PNACL_C', '$pnacl_cc -o $out -c $in -MMD -MF $out.d $optflags $cflags',
deps='gcc', depfile='$out.d',
description='CC[PNaCl] $in')
ninja.rule('LINK_PNACL_C', '$pnacl_cc -o $out $in $ldflags',
description='CCLD[PNaCl] $out')
ninja.rule('FINALIZE_PNACL', '$pnacl_finalize -o $out $in',
description='FINALIZE[PNaCl] $out')
ninja.rule('PROTOC_C', '$protoc_c --proto_path=$indir --c_out=$outdir $in',
description='PROTOC[CXX] $in')
# Build targets
proto_dir = os.path.join(root_dir, "protobuf")
proto_sources = [os.path.join(proto_dir, path) for path in glob.glob(os.path.join(proto_dir, "*.proto"))]
c_source_dir = os.path.join(root_dir, "lib", "nacl")
c_build_dir = os.path.join(root_dir, "build", "nacl")
c_sources = [os.path.join(root_dir, path) for path in glob.glob(os.path.join(c_source_dir, "*.c"))]
c_objects = [os.path.join(c_build_dir, replace_ext(os.path.relpath(path, c_source_dir), ".bc")) for path in c_sources]
c_proto_sources = [os.path.join(c_source_dir, replace_ext(os.path.relpath(path, proto_dir), ".pb-c.c")) for path in proto_sources]
c_proto_headers = [os.path.join(c_source_dir, replace_ext(os.path.relpath(path, proto_dir), ".pb-c.h")) for path in proto_sources]
c_proto_objects = [os.path.join(c_build_dir, replace_ext(os.path.relpath(path, c_source_dir), ".bc")) for path in c_proto_sources]
for proto_source, c_source, c_header, c_object in zip(proto_sources, c_proto_sources, c_proto_headers, c_proto_objects):
ninja.build([c_source, c_header], "PROTOC_C", proto_source,
variables={'indir': proto_dir, 'outdir': c_source_dir})
if c_source not in c_sources:
c_sources.append(c_source)
c_objects.append(c_object)
for source, object in zip(c_sources, c_objects):
ninja.build(object, 'COMPILE_PNACL_C', source,
variables={'optflags': '-O3',
'cflags': '-I$nacl_sdk_dir/include -pthread -g -std=gnu99 -Wno-long-long -Wall -Werror -Wno-unused-variable -Wno-error=unused-function'})
ninja.build(os.path.join(root_dir, 'furious.bc'), 'LINK_PNACL_C', c_objects,
variables={'ldflags': '-L$nacl_sdk_dir/lib/pnacl/Release -lppapi -lm -lprotobuf-c'})
ninja.build(os.path.join(root_dir, 'furious.pexe'), 'FINALIZE_PNACL', os.path.join(root_dir, 'furious.bc'))
| mit |
stbuehler/pdns | regression-tests.dnsdist/test_Routing.py | 1 | 12809 | #!/usr/bin/env python
import threading
import time
import dns
from dnsdisttests import DNSDistTest
class TestRoutingPoolRouting(DNSDistTest):
_config_template = """
newServer{address="127.0.0.1:%s", pool="real"}
addAction(makeRule("poolaction.routing.tests.powerdns.com"), PoolAction("real"))
"""
def testPolicyPoolAction(self):
"""
Routing: Set pool by qname via PoolAction
Send an A query to "poolaction.routing.tests.powerdns.com.",
check that dnsdist routes the query to the "real" pool.
"""
name = 'poolaction.routing.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
response = dns.message.make_response(query)
rrset = dns.rrset.from_text(name,
60,
dns.rdataclass.IN,
dns.rdatatype.A,
'192.0.2.1')
response.answer.append(rrset)
(receivedQuery, receivedResponse) = self.sendUDPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
(receivedQuery, receivedResponse) = self.sendTCPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
def testDefaultPool(self):
"""
Routing: Set pool by qname canary
Send an A query to "notpool.routing.tests.powerdns.com.",
check that dnsdist sends no response (no servers
in the default pool).
"""
name = 'notpool.routing.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
(_, receivedResponse) = self.sendUDPQuery(query, response=None, useQueue=False)
self.assertEquals(receivedResponse, None)
(_, receivedResponse) = self.sendTCPQuery(query, response=None, useQueue=False)
self.assertEquals(receivedResponse, None)
class TestRoutingQPSPoolRouting(DNSDistTest):
_config_template = """
newServer{address="127.0.0.1:%s", pool="regular"}
addAction(makeRule("qpspoolaction.routing.tests.powerdns.com"), QPSPoolAction(10, "regular"))
"""
def testQPSPoolAction(self):
"""
Routing: Set pool by QPS via action
Send queries to "qpspoolaction.routing.tests.powerdns.com."
check that dnsdist does not route the query to the "regular" pool
when the max QPS has been reached.
"""
maxQPS = 10
name = 'qpspoolaction.routing.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
response = dns.message.make_response(query)
rrset = dns.rrset.from_text(name,
60,
dns.rdataclass.IN,
dns.rdatatype.A,
'192.0.2.1')
response.answer.append(rrset)
for _ in range(maxQPS):
(receivedQuery, receivedResponse) = self.sendUDPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
# we should now be sent to the "abuse" pool which is empty,
# so the queries should be dropped
(_, receivedResponse) = self.sendUDPQuery(query, response=None, useQueue=False)
self.assertEquals(receivedResponse, None)
time.sleep(1)
# again, over TCP this time
for _ in range(maxQPS):
(receivedQuery, receivedResponse) = self.sendTCPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
(_, receivedResponse) = self.sendTCPQuery(query, response=None, useQueue=False)
self.assertEquals(receivedResponse, None)
class TestRoutingRoundRobinLB(DNSDistTest):
_testServer2Port = 5351
_config_params = ['_testServerPort', '_testServer2Port']
_config_template = """
setServerPolicy(roundrobin)
s1 = newServer{address="127.0.0.1:%s"}
s1:setUp()
s2 = newServer{address="127.0.0.1:%s"}
s2:setUp()
"""
@classmethod
def startResponders(cls):
print("Launching responders..")
cls._UDPResponder = threading.Thread(name='UDP Responder', target=cls.UDPResponder, args=[cls._testServerPort, cls._toResponderQueue, cls._fromResponderQueue])
cls._UDPResponder.setDaemon(True)
cls._UDPResponder.start()
cls._UDPResponder2 = threading.Thread(name='UDP Responder 2', target=cls.UDPResponder, args=[cls._testServer2Port, cls._toResponderQueue, cls._fromResponderQueue])
cls._UDPResponder2.setDaemon(True)
cls._UDPResponder2.start()
cls._TCPResponder = threading.Thread(name='TCP Responder', target=cls.TCPResponder, args=[cls._testServerPort, cls._toResponderQueue, cls._fromResponderQueue])
cls._TCPResponder.setDaemon(True)
cls._TCPResponder.start()
cls._TCPResponder2 = threading.Thread(name='TCP Responder 2', target=cls.TCPResponder, args=[cls._testServer2Port, cls._toResponderQueue, cls._fromResponderQueue])
cls._TCPResponder2.setDaemon(True)
cls._TCPResponder2.start()
def testRR(self):
"""
Routing: Round Robin
Send 100 A queries to "rr.routing.tests.powerdns.com.",
check that dnsdist routes half of it to each backend.
"""
numberOfQueries = 10
name = 'rr.routing.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
response = dns.message.make_response(query)
rrset = dns.rrset.from_text(name,
60,
dns.rdataclass.IN,
dns.rdatatype.A,
'192.0.2.1')
response.answer.append(rrset)
# the round robin counter is shared for UDP and TCP,
# so we need to do UDP then TCP to have a clean count
for _ in range(numberOfQueries):
(receivedQuery, receivedResponse) = self.sendUDPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
for _ in range(numberOfQueries):
(receivedQuery, receivedResponse) = self.sendTCPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
for key in self._responsesCounter:
value = self._responsesCounter[key]
self.assertEquals(value, numberOfQueries / 2)
class TestRoutingRoundRobinLBOneDown(DNSDistTest):
_testServer2Port = 5351
_config_params = ['_testServerPort', '_testServer2Port']
_config_template = """
setServerPolicy(roundrobin)
s1 = newServer{address="127.0.0.1:%s"}
s1:setUp()
s2 = newServer{address="127.0.0.1:%s"}
s2:setDown()
"""
def testRRWithOneDown(self):
"""
Routing: Round Robin with one server down
Send 100 A queries to "rr.routing.tests.powerdns.com.",
check that dnsdist routes all of it to the only backend up.
"""
numberOfQueries = 10
name = 'rr.routing.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
response = dns.message.make_response(query)
rrset = dns.rrset.from_text(name,
60,
dns.rdataclass.IN,
dns.rdatatype.A,
'192.0.2.1')
response.answer.append(rrset)
# the round robin counter is shared for UDP and TCP,
# so we need to do UDP then TCP to have a clean count
for _ in range(numberOfQueries):
(receivedQuery, receivedResponse) = self.sendUDPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
for _ in range(numberOfQueries):
(receivedQuery, receivedResponse) = self.sendTCPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
total = 0
for key in self._responsesCounter:
value = self._responsesCounter[key]
self.assertTrue(value == numberOfQueries or value == 0)
total += value
self.assertEquals(total, numberOfQueries * 2)
class TestRoutingOrder(DNSDistTest):
_testServer2Port = 5351
_config_params = ['_testServerPort', '_testServer2Port']
_config_template = """
setServerPolicy(firstAvailable)
s1 = newServer{address="127.0.0.1:%s", order=2}
s1:setUp()
s2 = newServer{address="127.0.0.1:%s", order=1}
s2:setUp()
"""
@classmethod
def startResponders(cls):
print("Launching responders..")
cls._UDPResponder = threading.Thread(name='UDP Responder', target=cls.UDPResponder, args=[cls._testServerPort, cls._toResponderQueue, cls._fromResponderQueue])
cls._UDPResponder.setDaemon(True)
cls._UDPResponder.start()
cls._UDPResponder2 = threading.Thread(name='UDP Responder 2', target=cls.UDPResponder, args=[cls._testServer2Port, cls._toResponderQueue, cls._fromResponderQueue])
cls._UDPResponder2.setDaemon(True)
cls._UDPResponder2.start()
cls._TCPResponder = threading.Thread(name='TCP Responder', target=cls.TCPResponder, args=[cls._testServerPort, cls._toResponderQueue, cls._fromResponderQueue])
cls._TCPResponder.setDaemon(True)
cls._TCPResponder.start()
cls._TCPResponder2 = threading.Thread(name='TCP Responder 2', target=cls.TCPResponder, args=[cls._testServer2Port, cls._toResponderQueue, cls._fromResponderQueue])
cls._TCPResponder2.setDaemon(True)
cls._TCPResponder2.start()
def testOrder(self):
"""
Routing: firstAvailable policy based on 'order'
Send 50 A queries to "order.routing.tests.powerdns.com.",
check that dnsdist routes all of it to the second backend
because it has the lower order value.
"""
numberOfQueries = 50
name = 'order.routing.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
response = dns.message.make_response(query)
rrset = dns.rrset.from_text(name,
60,
dns.rdataclass.IN,
dns.rdatatype.A,
'192.0.2.1')
response.answer.append(rrset)
for _ in range(numberOfQueries):
(receivedQuery, receivedResponse) = self.sendUDPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
(receivedQuery, receivedResponse) = self.sendTCPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
total = 0
if 'UDP Responder' in self._responsesCounter:
self.assertEquals(self._responsesCounter['UDP Responder'], 0)
self.assertEquals(self._responsesCounter['UDP Responder 2'], numberOfQueries)
if 'TCP Responder' in self._responsesCounter:
self.assertEquals(self._responsesCounter['TCP Responder'], 0)
self.assertEquals(self._responsesCounter['TCP Responder 2'], numberOfQueries)
class TestRoutingNoServer(DNSDistTest):
_config_template = """
newServer{address="127.0.0.1:%s", pool="real"}
setServFailWhenNoServer(true)
"""
def testPolicyPoolNoServer(self):
"""
Routing: No server should return ServFail
"""
name = 'noserver.routing.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
expectedResponse = dns.message.make_response(query)
expectedResponse.set_rcode(dns.rcode.SERVFAIL)
(_, receivedResponse) = self.sendUDPQuery(query, response=None, useQueue=False)
self.assertEquals(receivedResponse, expectedResponse)
(_, receivedResponse) = self.sendTCPQuery(query, response=None, useQueue=False)
self.assertEquals(receivedResponse, expectedResponse)
| gpl-2.0 |
ixc/django-fluent-contents | fluent_contents/tests/utils.py | 2 | 4672 | from __future__ import print_function
from future.builtins import str
from functools import wraps
from django.conf import settings, UserSettingsHolder
from django.contrib.auth import get_user_model
from django.core.management import call_command
from django.contrib.sites.models import Site
from django.db.models import loading
from django.test import TestCase
from django.utils.importlib import import_module
import os
class AppTestCase(TestCase):
"""
Tests for URL resolving.
"""
user = None
install_apps = (
'fluent_contents.tests.testapp',
)
@classmethod
def setUpClass(cls):
super(AppTestCase, cls).setUpClass()
# Avoid early import, triggers AppCache
from django.template.loaders import app_directories
User = get_user_model()
if cls.install_apps:
# When running this app via `./manage.py test fluent_pages`, auto install the test app + models.
run_syncdb = False
for appname in cls.install_apps:
if appname not in settings.INSTALLED_APPS:
print('Adding {0} to INSTALLED_APPS'.format(appname))
settings.INSTALLED_APPS += (appname,)
run_syncdb = True
# Flush caches
testapp = import_module(appname)
loading.cache.loaded = False
app_directories.app_template_dirs += (
os.path.join(os.path.dirname(testapp.__file__), 'templates'),
)
print(appname, os.path.join(os.path.dirname(testapp.__file__), 'templates'))
if run_syncdb:
call_command('syncdb', verbosity=0) # may run south's overlaid version
# Create basic objects
# 1.4 does not create site automatically with the defined SITE_ID, 1.3 does.
Site.objects.get_or_create(id=settings.SITE_ID, defaults=dict(domain='django.localhost', name='django at localhost'))
(cls.user, _) = User.objects.get_or_create(is_superuser=True, is_staff=True, username="admin")
def assert200(self, url, msg_prefix=''):
"""
Test that an URL exists.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEqual(self.client.get(url).status_code, 200, str(msg_prefix) + u"Page at {0} should be found.".format(url))
def assert404(self, url, msg_prefix=''):
"""
Test that an URL does not exist.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEqual(self.client.get(url).status_code, 404, str(msg_prefix) + u"Page at {0} should return 404.".format(url))
try:
from django.test.utils import override_settings # Django 1.4
except ImportError:
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import TransactionTestCase
if isinstance(test_func, type) and issubclass(test_func, TransactionTestCase):
original_pre_setup = test_func._pre_setup
original_post_teardown = test_func._post_teardown
def _pre_setup(innerself):
self.enable()
original_pre_setup(innerself)
def _post_teardown(innerself):
original_post_teardown(innerself)
self.disable()
test_func._pre_setup = _pre_setup
test_func._post_teardown = _post_teardown
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def enable(self):
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
def disable(self):
settings._wrapped = self.wrapped
| apache-2.0 |
Ichag/openerp-server | openerp/addons/base/res/res_config.py | 1 | 28694 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import attrgetter
import re
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp import exceptions
_logger = logging.getLogger(__name__)
class res_config_module_installation_mixin(object):
def _install_modules(self, cr, uid, modules, context):
"""Install the requested modules.
return the next action to execute
modules is a list of tuples
(mod_name, browse_record | None)
"""
ir_module = self.pool.get('ir.module.module')
to_install_ids = []
to_install_missing_names = []
for name, module in modules:
if not module:
to_install_missing_names.append(name)
elif module.state == 'uninstalled':
to_install_ids.append(module.id)
result = None
if to_install_ids:
result = ir_module.button_immediate_install(cr, uid, to_install_ids, context=context)
#FIXME: if result is not none, the corresponding todo will be skipped because it was just marked done
if to_install_missing_names:
return {
'type': 'ir.actions.client',
'tag': 'apps',
'params': {'modules': to_install_missing_names},
}
return result
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
def _next_action(self, cr, uid, context=None):
Todos = self.pool['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todos = Todos.browse(cr, uid,
Todos.search(cr, uid, ['&', ('type', '=', 'automatic'), ('state','=','open')]),
context=context)
user_groups = set(map(
lambda g: g.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
valid_todos_for_user = [
todo for todo in active_todos
if not todo.groups_id or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
]
if valid_todos_for_user:
return valid_todos_for_user[0]
return None
def _next(self, cr, uid, context=None):
_logger.info('getting next operation')
next = self._next_action(cr, uid, context=context)
_logger.info('next action is %s', next)
if next:
res = next.action_launch(context=context)
res['nodestroy'] = False
return res
# reload the client; open the first available root menu
menu_obj = self.pool['ir.ui.menu']
menu_ids = menu_obj.search(cr, uid, [('parent_id', '=', False)], context=context)
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu_ids and menu_ids[0] or False},
}
def start(self, cr, uid, ids, context=None):
return self.next(cr, uid, ids, context)
def next(self, cr, uid, ids, context=None):
""" Returns the next todo action to execute (using the default
sort order)
"""
return self._next(cr, uid, context=context)
def execute(self, cr, uid, ids, context=None):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self, cr, uid, ids, context=None):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self, cr, uid, ids, context=None):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.execute(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_skip(self, cr, uid, ids, context=None):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
class res_config_installer(osv.osv_memory, res_config_module_installation_mixin):
""" New-style configuration base specialized for addons selection
and installation.
Basic usage
-----------
Subclasses can simply define a number of _columns as
fields.boolean objects. The keys (column names) should be the
names of the addons to install (when selected). Upon action
execution, selected boolean fields (and those only) will be
interpreted as addons to install, and batch-installed.
Additional addons
-----------------
It is also possible to require the installation of an additional
addon set when a specific preset of addons has been marked for
installation (in the basic usage only, additionals can't depend on
one another).
These additionals are defined through the ``_install_if``
property. This property is a mapping of a collection of addons (by
name) to a collection of addons (by name) [#]_, and if all the *key*
addons are selected for installation, then the *value* ones will
be selected as well. For example::
_install_if = {
('sale','crm'): ['sale_crm'],
}
This will install the ``sale_crm`` addon if and only if both the
``sale`` and ``crm`` addons are selected for installation.
You can define as many additionals as you wish, and additionals
can overlap in key and value. For instance::
_install_if = {
('sale','crm'): ['sale_crm'],
('sale','project'): ['project_mrp'],
}
will install both ``sale_crm`` and ``project_mrp`` if all of
``sale``, ``crm`` and ``project`` are selected for installation.
Hook methods
------------
Subclasses might also need to express dependencies more complex
than that provided by additionals. In this case, it's possible to
define methods of the form ``_if_%(name)s`` where ``name`` is the
name of a boolean field. If the field is selected, then the
corresponding module will be marked for installation *and* the
hook method will be executed.
Hook methods take the usual set of parameters (cr, uid, ids,
context) and can return a collection of additional addons to
install (if they return anything, otherwise they should not return
anything, though returning any "falsy" value such as None or an
empty collection will have the same effect).
Complete control
----------------
The last hook is to simply overload the ``modules_to_install``
method, which implements all the mechanisms above. This method
takes the usual set of parameters (cr, uid, ids, context) and
returns a ``set`` of addons to install (addons selected by the
above methods minus addons from the *basic* set which are already
installed) [#]_ so an overloader can simply manipulate the ``set``
returned by ``res_config_installer.modules_to_install`` to add or
remove addons.
Skipping the installer
----------------------
Unless it is removed from the view, installers have a *skip*
button which invokes ``action_skip`` (and the ``cancel`` hook from
``res.config``). Hooks and additionals *are not run* when skipping
installation, even for already installed addons.
Again, setup your hooks accordingly.
.. [#] note that since a mapping key needs to be hashable, it's
possible to use a tuple or a frozenset, but not a list or a
regular set
.. [#] because the already-installed modules are only pruned at
the very end of ``modules_to_install``, additionals and
hooks depending on them *are guaranteed to execute*. Setup
your hooks accordingly.
"""
_name = 'res.config.installer'
_inherit = 'res.config'
_install_if = {}
def already_installed(self, cr, uid, context=None):
""" For each module, check if it's already installed and if it
is return its name
:returns: a list of the already installed modules in this
installer
:rtype: [str]
"""
return map(attrgetter('name'),
self._already_installed(cr, uid, context=context))
def _already_installed(self, cr, uid, context=None):
""" For each module (boolean fields in a res.config.installer),
check if it's already installed (either 'to install', 'to upgrade'
or 'installed') and if it is return the module's browse_record
:returns: a list of all installed modules in this installer
:rtype: [browse_record]
"""
modules = self.pool['ir.module.module']
selectable = [field for field in self._columns
if type(self._columns[field]) is fields.boolean]
return modules.browse(
cr, uid,
modules.search(cr, uid,
[('name','in',selectable),
('state','in',['to install', 'installed', 'to upgrade'])],
context=context),
context=context)
def modules_to_install(self, cr, uid, ids, context=None):
""" selects all modules to install:
* checked boolean fields
* return values of hook methods. Hook methods are of the form
``_if_%(addon_name)s``, and are called if the corresponding
addon is marked for installation. They take the arguments
cr, uid, ids and context, and return an iterable of addon
names
* additionals, additionals are setup through the ``_install_if``
class variable. ``_install_if`` is a dict of {iterable:iterable}
where key and value are iterables of addon names.
If all the addons in the key are selected for installation
(warning: addons added through hooks don't count), then the
addons in the value are added to the set of modules to install
* not already installed
"""
base = set(module_name
for installer in self.read(cr, uid, ids, context=context)
for module_name, to_install in installer.iteritems()
if module_name != 'id'
if type(self._columns[module_name]) is fields.boolean
if to_install)
hooks_results = set()
for module in base:
hook = getattr(self, '_if_%s'% module, None)
if hook:
hooks_results.update(hook(cr, uid, ids, context=None) or set())
additionals = set(
module for requirements, consequences \
in self._install_if.iteritems()
if base.issuperset(requirements)
for module in consequences)
return (base | hooks_results | additionals).difference(
self.already_installed(cr, uid, context))
def default_get(self, cr, uid, fields_list, context=None):
''' If an addon is already installed, check it by default
'''
defaults = super(res_config_installer, self).default_get(
cr, uid, fields_list, context=context)
return dict(defaults,
**dict.fromkeys(
self.already_installed(cr, uid, context=context),
True))
def fields_get(self, cr, uid, fields=None, context=None, write_access=True):
""" If an addon is already installed, set it to readonly as
res.config.installer doesn't handle uninstallations of already
installed addons
"""
fields = super(res_config_installer, self).fields_get(
cr, uid, fields, context, write_access)
for name in self.already_installed(cr, uid, context=context):
if name not in fields:
continue
fields[name].update(
readonly=True,
help= ustr(fields[name].get('help', '')) +
_('\n\nThis addon is already installed on your system'))
return fields
def execute(self, cr, uid, ids, context=None):
to_install = list(self.modules_to_install(
cr, uid, ids, context=context))
_logger.info('Selecting addons %s to install', to_install)
ir_module = self.pool.get('ir.module.module')
modules = []
for name in to_install:
mod_ids = ir_module.search(cr, uid, [('name', '=', name)])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
return self._install_modules(cr, uid, modules, context=context)
class res_config_settings(osv.osv_memory, res_config_module_installation_mixin):
""" Base configuration wizard for application settings. It provides support for setting
default values, assigning groups to employee users, and installing modules.
To make such a 'settings' wizard, define a model like::
class my_config_wizard(osv.osv_memory):
_name = 'my.settings'
_inherit = 'res.config.settings'
_columns = {
'default_foo': fields.type(..., default_model='my.model'),
'group_bar': fields.boolean(..., group='base.group_user', implied_group='my.group'),
'module_baz': fields.boolean(...),
'other_field': fields.type(...),
}
The method ``execute`` provides some support based on a naming convention:
* For a field like 'default_XXX', ``execute`` sets the (global) default value of
the field 'XXX' in the model named by ``default_model`` to the field's value.
* For a boolean field like 'group_XXX', ``execute`` adds/removes 'implied_group'
to/from the implied groups of 'group', depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
* For a boolean field like 'module_XXX', ``execute`` triggers the immediate
installation of the module named 'XXX' if the field has value ``True``.
* For the other fields, the method ``execute`` invokes all methods with a name
that starts with 'set_'; such methods can be defined to implement the effect
of those fields.
The method ``default_get`` retrieves values that reflect the current status of the
fields like 'default_XXX', 'group_XXX' and 'module_XXX'. It also invokes all methods
with a name that starts with 'get_default_'; such methods can be defined to provide
current values for other fields.
"""
_name = 'res.config.settings'
def copy(self, cr, uid, id, values, context=None):
raise osv.except_osv(_("Cannot duplicate configuration!"), "")
def _get_classified_fields(self, cr, uid, context=None):
""" return a dictionary with the fields classified by category::
{ 'default': [('default_foo', 'model', 'foo'), ...],
'group': [('group_bar', browse_group, browse_implied_group), ...],
'module': [('module_baz', browse_module), ...],
'other': ['other_field', ...],
}
"""
ir_model_data = self.pool['ir.model.data']
ir_module = self.pool['ir.module.module']
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return ir_model_data.get_object(cr, uid, mod, xml, context)
defaults, groups, modules, others = [], [], [], []
for name, field in self._columns.items():
if name.startswith('default_') and hasattr(field, 'default_model'):
defaults.append((name, field.default_model, name[8:]))
elif name.startswith('group_') and isinstance(field, fields.boolean) and hasattr(field, 'implied_group'):
field_group = getattr(field, 'group', 'base.group_user')
groups.append((name, ref(field_group), ref(field.implied_group)))
elif name.startswith('module_') and isinstance(field, fields.boolean):
mod_ids = ir_module.search(cr, uid, [('name', '=', name[7:])])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
else:
others.append(name)
return {'default': defaults, 'group': groups, 'module': modules, 'other': others}
def default_get(self, cr, uid, fields, context=None):
ir_values = self.pool['ir.values']
classified = self._get_classified_fields(cr, uid, context)
res = super(res_config_settings, self).default_get(cr, uid, fields, context)
# defaults: take the corresponding default value they set
for name, model, field in classified['default']:
value = ir_values.get_default(cr, uid, model, field)
if value is not None:
res[name] = value
# groups: which groups are implied by the group Employee
for name, group, implied_group in classified['group']:
res[name] = implied_group in group.implied_ids
# modules: which modules are installed/to install
for name, module in classified['module']:
res[name] = module and module.state in ('installed', 'to install', 'to upgrade')
# other fields: call all methods that start with 'get_default_'
for method in dir(self):
if method.startswith('get_default_'):
res.update(getattr(self, method)(cr, uid, fields, context))
return res
def execute(self, cr, uid, ids, context=None):
ir_values = self.pool['ir.values']
ir_module = self.pool['ir.module.module']
classified = self._get_classified_fields(cr, uid, context)
config = self.browse(cr, uid, ids[0], context)
# default values fields
for name, model, field in classified['default']:
ir_values.set_default(cr, uid, model, field, config[name])
# group fields: modify group / implied groups
for name, group, implied_group in classified['group']:
if config[name]:
group.write({'implied_ids': [(4, implied_group.id)]})
else:
group.write({'implied_ids': [(3, implied_group.id)]})
implied_group.write({'users': [(3, u.id) for u in group.users]})
# other fields: execute all methods that start with 'set_'
for method in dir(self):
if method.startswith('set_'):
getattr(self, method)(cr, uid, ids, context)
# module fields: install/uninstall the selected modules
to_install = []
to_uninstall_ids = []
lm = len('module_')
for name, module in classified['module']:
if config[name]:
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_ids.append(module.id)
if to_uninstall_ids:
ir_module.button_immediate_uninstall(cr, uid, to_uninstall_ids, context=context)
action = self._install_modules(cr, uid, to_install, context=context)
if action:
return action
# After the uninstall/install calls, the self.pool is no longer valid.
# So we reach into the RegistryManager directly.
res_config = openerp.modules.registry.RegistryManager.get(cr.dbname)['res.config']
config = res_config.next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# force client-side reload (update user menu and current view)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def cancel(self, cr, uid, ids, context=None):
# ignore the current record, and send the action to reopen the view
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)])
if action_ids:
return act_window.read(cr, uid, action_ids[0], [], context=context)
return {}
def name_get(self, cr, uid, ids, context=None):
""" Override name_get method to return an appropriate configuration wizard
name, and not the generated name."""
if not ids:
return []
# name_get may receive int id instead of an id list
if isinstance(ids, (int, long)):
ids = [ids]
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)], context=context)
name = self._name
if action_ids:
name = act_window.read(cr, uid, action_ids[0], ['name'], context=context)['name']
return [(record.id, name) for record in self.browse(cr, uid , ids, context=context)]
def get_option_path(self, cr, uid, menu_xml_id, context=None):
"""
Fetch the path to a specified configuration view and the action id to access it.
:param string menu_xml_id: the xml id of the menuitem where the view is located,
structured as follows: module_name.menuitem_xml_id (e.g.: "base.menu_sale_config")
:return tuple:
- t[0]: string: full path to the menuitem (e.g.: "Settings/Configuration/Sales")
- t[1]: int or long: id of the menuitem's action
"""
module_name, menu_xml_id = menu_xml_id.split('.')
dummy, menu_id = self.pool['ir.model.data'].get_object_reference(cr, uid, module_name, menu_xml_id)
ir_ui_menu = self.pool['ir.ui.menu'].browse(cr, uid, menu_id, context=context)
return (ir_ui_menu.complete_name, ir_ui_menu.action.id)
def get_option_name(self, cr, uid, full_field_name, context=None):
"""
Fetch the human readable name of a specified configuration option.
:param string full_field_name: the full name of the field, structured as follows:
model_name.field_name (e.g.: "sale.config.settings.fetchmail_lead")
:return string: human readable name of the field (e.g.: "Create leads from incoming mails")
"""
model_name, field_name = full_field_name.rsplit('.', 1)
return self.pool[model_name].fields_get(cr, uid, allfields=[field_name], context=context)[field_name]['string']
def get_config_warning(self, cr, msg, context=None):
"""
Helper: return a Warning exception with the given message where the %(field:xxx)s
and/or %(menu:yyy)s are replaced by the human readable field's name and/or menuitem's
full path.
Usage:
------
Just include in your error message %(field:model_name.field_name)s to obtain the human
readable field's name, and/or %(menu:module_name.menuitem_xml_id)s to obtain the menuitem's
full path.
Example of use:
---------------
from openerp.addons.base.res.res_config import get_warning_config
raise get_warning_config(cr, _("Error: this action is prohibited. You should check the field %(field:sale.config.settings.fetchmail_lead)s in %(menu:base.menu_sale_config)s."), context=context)
This will return an exception containing the following message:
Error: this action is prohibited. You should check the field Create leads from incoming mails in Settings/Configuration/Sales.
What if there is another substitution in the message already?
-------------------------------------------------------------
You could have a situation where the error message you want to upgrade already contains a substitution. Example:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.
What you want to do here is simply to replace the path by %menu:account.menu_account_config)s, and leave the rest alone.
In order to do that, you can use the double percent (%%) to escape your new substitution, like so:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the %%(menu:account.menu_account_config)s.
"""
res_config_obj = openerp.registry(cr.dbname)['res.config.settings']
regex_path = r'%\(((?:menu|field):[a-z_\.]*)\)s'
# Process the message
# 1/ find the menu and/or field references, put them in a list
references = re.findall(regex_path, msg, flags=re.I)
# 2/ fetch the menu and/or field replacement values (full path and
# human readable field's name) and the action_id if any
values = {}
action_id = None
for item in references:
ref_type, ref = item.split(':')
if ref_type == 'menu':
values[item], action_id = res_config_obj.get_option_path(cr, SUPERUSER_ID, ref, context=context)
elif ref_type == 'field':
values[item] = res_config_obj.get_option_name(cr, SUPERUSER_ID, ref, context=context)
# 3/ substitute and return the result
if (action_id):
return exceptions.RedirectWarning(msg % values, action_id, _('Go to the configuration panel'))
return exceptions.Warning(msg % values)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
eufarn7sp/egads-eufar | egads/thirdparty/quantities/constants/alpha.py | 4 | 1344 | # -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import
from ._utils import _cd
from ..unitquantity import UnitConstant
m_alpha = alpha_particle_mass = UnitConstant(
'alpha_particle_mass',
_cd('alpha particle mass'),
symbol='m_alpha',
u_symbol='m_α'
)
alpha_particle_mass_energy_equivalent = UnitConstant(
'alpha_particle_mass_energy_equivalent',
_cd('alpha particle mass energy equivalent'),
symbol='(m_alpha*c**2)',
u_symbol='(m_α·c²)'
)
alpha_particle_mass_energy_equivalent_in_MeV = UnitConstant(
'alpha_particle_mass_energy_equivalent_in_MeV',
_cd('alpha particle mass energy equivalent in MeV'),
)
alpha_particle_mass_in_u = UnitConstant(
'alpha_particle_mass_in_u',
_cd('alpha particle mass in u')
)
alpha_particle_molar_mass = UnitConstant(
'alpha_particle_molar_mass',
_cd('alpha particle molar mass'),
symbol='M_alpha',
u_symbol='M_α'
)
alpha_particle_electron_mass_ratio = UnitConstant(
'alpha_particle_electron_mass_ratio',
_cd('alpha particle-electron mass ratio'),
symbol='(m_alpha/m_e)',
u_symbol='(m_α/mₑ)'
)
alpha_particle_proton_mass_ratio = UnitConstant(
'alpha_particle_proton_mass_ratio',
_cd('alpha particle-proton mass ratio'),
symbol='(m_alpha/m_p)',
u_symbol='(m_α/m_p)'
)
del UnitConstant, _cd
| bsd-3-clause |
untom/keras | tests/auto/keras/test_normalization.py | 33 | 3810 | import unittest
import numpy as np
from numpy.testing import assert_allclose
from theano import tensor as T
from keras.layers import normalization
from keras.models import Sequential
class TestBatchNormalization(unittest.TestCase):
def setUp(self):
self.input_1 = np.arange(10)
self.input_2 = np.zeros(10)
self.input_3 = np.ones((10))
self.input_shapes = [np.ones((10, 10)), np.ones((10, 10, 10))]
def test_setup(self):
norm_m0 = normalization.BatchNormalization((10, 10))
norm_m1 = normalization.BatchNormalization((10, 10), mode=1)
# mode 3 does not exist
self.assertRaises(Exception, normalization.BatchNormalization((10, 10), mode=3))
def test_mode_0(self):
model = Sequential()
norm_m0 = normalization.BatchNormalization((10,))
model.add(norm_m0)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
model.fit(X, X, nb_epoch=5, verbose=0)
norm_m0.input = X
out = (norm_m0.get_output(train=True) - norm_m0.beta) / norm_m0.gamma
self.assertAlmostEqual(out.mean().eval(), 0.0, places=1)
self.assertAlmostEqual(out.std().eval(), 1.0, places=1)
def test_mode_1(self):
norm_m1 = normalization.BatchNormalization((10,), mode=1)
norm_m1.init_updates()
for inp in [self.input_1, self.input_2, self.input_3]:
norm_m1.input = inp
out = (norm_m1.get_output(train=True) - norm_m1.beta) / norm_m1.gamma
self.assertAlmostEqual(out.mean().eval(), 0.0)
if inp.std() > 0.:
self.assertAlmostEqual(out.std().eval(), 1.0, places=2)
else:
self.assertAlmostEqual(out.std().eval(), 0.0, places=2)
def test_shapes(self):
"""
Test batch normalization with various input shapes
"""
for inp in self.input_shapes:
norm_m0 = normalization.BatchNormalization(inp.shape, mode=0)
norm_m0.init_updates()
norm_m0.input = inp
out = (norm_m0.get_output(train=True) - norm_m0.beta) / norm_m0.gamma
norm_m1 = normalization.BatchNormalization(inp.shape, mode=1)
norm_m1.input = inp
out = (norm_m1.get_output(train=True) - norm_m1.beta) / norm_m1.gamma
def test_weight_init(self):
"""
Test weight initialization
"""
norm_m1 = normalization.BatchNormalization((10,), mode=1, weights=[np.ones(10), np.ones(10), np.zeros(10), np.zeros(10)])
norm_m1.init_updates()
for inp in [self.input_1, self.input_2, self.input_3]:
norm_m1.input = inp
out = (norm_m1.get_output(train=True) - np.ones(10)) / 1.
self.assertAlmostEqual(out.mean().eval(), 0.0)
if inp.std() > 0.:
self.assertAlmostEqual(out.std().eval(), 1.0, places=2)
else:
self.assertAlmostEqual(out.std().eval(), 0.0, places=2)
assert_allclose(norm_m1.gamma.eval(), np.ones(10))
assert_allclose(norm_m1.beta.eval(), np.ones(10))
def test_config(self):
norm = normalization.BatchNormalization((10, 10), mode=1, epsilon=0.1)
conf = norm.get_config()
conf_target = {"input_shape": (10, 10), "name": normalization.BatchNormalization.__name__,
"epsilon": 0.1, "mode": 1}
self.assertDictEqual(conf, conf_target)
def test_save_weights(self):
norm = normalization.BatchNormalization((10, 10), mode=1, epsilon=0.1)
weights = norm.get_weights()
assert(len(weights) == 4)
norm.set_weights(weights)
if __name__ == '__main__':
unittest.main()
| mit |
afloren/nipype | nipype/interfaces/cmtk/convert.py | 14 | 10363 | """
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os, os.path as op
import datetime
import string
import warnings
import networkx as nx
from nipype.interfaces.base import (BaseInterface, BaseInterfaceInputSpec, traits,
File, TraitedSpec, InputMultiPath, isdefined)
from nipype.utils.filemanip import split_filename
from nipype.utils.misc import package_check
have_cfflib = True
try:
package_check('cfflib')
except Exception, e:
have_cfflib = False
else:
import cfflib as cf
class CFFConverterInputSpec(BaseInterfaceInputSpec):
graphml_networks = InputMultiPath(File(exists=True), desc='list of graphML networks')
gpickled_networks = InputMultiPath(File(exists=True), desc='list of gpickled Networkx graphs')
gifti_surfaces = InputMultiPath(File(exists=True), desc='list of GIFTI surfaces')
gifti_labels = InputMultiPath(File(exists=True), desc='list of GIFTI labels')
nifti_volumes = InputMultiPath(File(exists=True), desc='list of NIFTI volumes')
tract_files = InputMultiPath(File(exists=True), desc='list of Trackvis fiber files')
timeseries_files = InputMultiPath(File(exists=True), desc='list of HDF5 timeseries files')
script_files = InputMultiPath(File(exists=True), desc='list of script files to include')
data_files = InputMultiPath(File(exists=True), desc='list of external data files (i.e. Numpy, HD5, XML) ')
title = traits.Str(desc='Connectome Title')
creator = traits.Str(desc='Creator')
email = traits.Str(desc='Email address')
publisher = traits.Str(desc='Publisher')
license = traits.Str(desc='License')
rights = traits.Str(desc='Rights')
references = traits.Str(desc='References')
relation = traits.Str(desc='Relation')
species = traits.Str('Homo sapiens',desc='Species',usedefault=True)
description = traits.Str('Created with the Nipype CFF converter', desc='Description', usedefault=True)
out_file = File('connectome.cff', usedefault = True, desc='Output connectome file')
class CFFConverterOutputSpec(TraitedSpec):
connectome_file = File(exists=True, desc='Output connectome file')
class CFFConverter(BaseInterface):
"""
Creates a Connectome File Format (CFF) file from input networks, surfaces, volumes, tracts, etcetera....
Example
-------
>>> import nipype.interfaces.cmtk as cmtk
>>> cvt = cmtk.CFFConverter()
>>> cvt.inputs.title = 'subject 1'
>>> cvt.inputs.gifti_surfaces = ['lh.pial_converted.gii', 'rh.pial_converted.gii']
>>> cvt.inputs.tract_files = ['streamlines.trk']
>>> cvt.inputs.gpickled_networks = ['network0.gpickle']
>>> cvt.run() # doctest: +SKIP
"""
input_spec = CFFConverterInputSpec
output_spec = CFFConverterOutputSpec
def _run_interface(self, runtime):
a = cf.connectome()
if isdefined(self.inputs.title):
a.connectome_meta.set_title(self.inputs.title)
else:
a.connectome_meta.set_title(self.inputs.out_file)
if isdefined(self.inputs.creator):
a.connectome_meta.set_creator(self.inputs.creator)
else:
#Probably only works on some OSes...
a.connectome_meta.set_creator(os.getenv('USER'))
if isdefined(self.inputs.email):
a.connectome_meta.set_email(self.inputs.email)
if isdefined(self.inputs.publisher):
a.connectome_meta.set_publisher(self.inputs.publisher)
if isdefined(self.inputs.license):
a.connectome_meta.set_license(self.inputs.license)
if isdefined(self.inputs.rights):
a.connectome_meta.set_rights(self.inputs.rights)
if isdefined(self.inputs.references):
a.connectome_meta.set_references(self.inputs.references)
if isdefined(self.inputs.relation):
a.connectome_meta.set_relation(self.inputs.relation)
if isdefined(self.inputs.species):
a.connectome_meta.set_species(self.inputs.species)
if isdefined(self.inputs.description):
a.connectome_meta.set_description(self.inputs.description)
a.connectome_meta.set_created(datetime.date.today())
count = 0
if isdefined(self.inputs.graphml_networks):
for ntwk in self.inputs.graphml_networks:
# There must be a better way to deal with the unique name problem
#(i.e. tracks and networks can't use the same name, and previously we were pulling them both from the input files)
ntwk_name = 'Network {cnt}'.format(cnt=count)
a.add_connectome_network_from_graphml(ntwk_name, ntwk)
count += 1
if isdefined(self.inputs.gpickled_networks):
unpickled = []
for ntwk in self.inputs.gpickled_networks:
_, ntwk_name, _ = split_filename(ntwk)
unpickled = nx.read_gpickle(ntwk)
cnet = cf.CNetwork(name = ntwk_name)
cnet.set_with_nxgraph(unpickled)
a.add_connectome_network(cnet)
count += 1
count = 0
if isdefined(self.inputs.tract_files):
for trk in self.inputs.tract_files:
_, trk_name, _ = split_filename(trk)
ctrack = cf.CTrack(trk_name, trk)
a.add_connectome_track(ctrack)
count += 1
count = 0
if isdefined(self.inputs.gifti_surfaces):
for surf in self.inputs.gifti_surfaces:
_, surf_name, _ = split_filename(surf)
csurf = cf.CSurface.create_from_gifti("Surface %d - %s" % (count,surf_name), surf)
csurf.fileformat='Gifti'
csurf.dtype='Surfaceset'
a.add_connectome_surface(csurf)
count += 1
count = 0
if isdefined(self.inputs.gifti_labels):
for label in self.inputs.gifti_labels:
_, label_name, _ = split_filename(label)
csurf = cf.CSurface.create_from_gifti("Surface Label %d - %s" % (count,label_name), label)
csurf.fileformat='Gifti'
csurf.dtype='Labels'
a.add_connectome_surface(csurf)
count += 1
if isdefined(self.inputs.nifti_volumes):
for vol in self.inputs.nifti_volumes:
_, vol_name, _ = split_filename(vol)
cvol = cf.CVolume.create_from_nifti(vol_name,vol)
a.add_connectome_volume(cvol)
if isdefined(self.inputs.script_files):
for script in self.inputs.script_files:
_, script_name, _ = split_filename(script)
cscript = cf.CScript.create_from_file(script_name, script)
a.add_connectome_script(cscript)
if isdefined(self.inputs.data_files):
for data in self.inputs.data_files:
_, data_name, _ = split_filename(data)
cda = cf.CData(name=data_name, src=data, fileformat='NumPy')
if not string.find(data_name,'lengths') == -1:
cda.dtype = 'FinalFiberLengthArray'
if not string.find(data_name,'endpoints') == -1:
cda.dtype = 'FiberEndpoints'
if not string.find(data_name,'labels') == -1:
cda.dtype = 'FinalFiberLabels'
a.add_connectome_data(cda)
a.print_summary()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.cff':
ext = '.cff'
cf.save_to_cff(a,op.abspath(name + ext))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.cff':
ext = '.cff'
outputs['connectome_file'] = op.abspath(name + ext)
return outputs
class MergeCNetworksInputSpec(BaseInterfaceInputSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True, desc='List of CFF files to extract networks from')
out_file = File('merged_network_connectome.cff', usedefault = True, desc='Output CFF file with all the networks added')
class MergeCNetworksOutputSpec(TraitedSpec):
connectome_file = File(exists=True, desc='Output CFF file with all the networks added')
class MergeCNetworks(BaseInterface):
""" Merges networks from multiple CFF files into one new CFF file.
Example
-------
>>> import nipype.interfaces.cmtk as cmtk
>>> mrg = cmtk.MergeCNetworks()
>>> mrg.inputs.in_files = ['subj1.cff','subj2.cff']
>>> mrg.run() # doctest: +SKIP
"""
input_spec = MergeCNetworksInputSpec
output_spec = MergeCNetworksOutputSpec
def _run_interface(self, runtime):
extracted_networks = []
for i, con in enumerate(self.inputs.in_files):
mycon = cf.load(con)
nets = mycon.get_connectome_network()
for ne in nets:
# here, you might want to skip networks with a given
# metadata information
ne.load()
contitle = mycon.get_connectome_meta().get_title()
ne.set_name( str(i) + ': ' + contitle + ' - ' + ne.get_name() )
ne.set_src(ne.get_name())
extracted_networks.append(ne)
# Add networks to new connectome
newcon = cf.connectome(title = 'All CNetworks', connectome_network = extracted_networks)
# Setting additional metadata
metadata = newcon.get_connectome_meta()
metadata.set_creator('My Name')
metadata.set_email('My Email')
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.cff':
ext = '.cff'
cf.save_to_cff(newcon, op.abspath(name + ext))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.cff':
ext = '.cff'
outputs['connectome_file'] = op.abspath(name + ext)
return outputs
| bsd-3-clause |
autosportlabs/RaceCapture_App | autosportlabs/racecapture/views/configuration/rcp/configview.py | 1 | 20903 | #
# Race Capture App
#
# Copyright (C) 2014-2017 Autosport Labs
#
# This file is part of the Race Capture App
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should
# have received a copy of the GNU General Public License along with
# this code. If not, see <http://www.gnu.org/licenses/>.
import os
import kivy
kivy.require('1.10.0')
from kivy.app import Builder
from kivy.uix.treeview import TreeViewLabel
from kivy.properties import ObjectProperty, BooleanProperty
from kivy.uix.popup import Popup
from kivy.uix.screenmanager import Screen
from kivy.clock import Clock
from kivy import platform
from kivy.logger import Logger
from autosportlabs.help.helpmanager import HelpInfo
from autosportlabs.racecapture.views.configuration.rcp.autocontrolconfigview import AutoControlConfigView
from autosportlabs.racecapture.views.configuration.rcp.analogchannelsview import *
from autosportlabs.racecapture.views.configuration.rcp.imuchannelsview import *
from autosportlabs.racecapture.views.configuration.rcp.gpschannelsview import *
from autosportlabs.racecapture.views.configuration.rcp.lapstatsview import *
from autosportlabs.racecapture.views.configuration.rcp.timerchannelsview import *
from autosportlabs.racecapture.views.configuration.rcp.gpiochannelsview import *
from autosportlabs.racecapture.views.configuration.rcp.pwmchannelsview import *
from autosportlabs.racecapture.views.configuration.rcp.trackconfigview import *
from autosportlabs.racecapture.views.configuration.rcp.canchannelsview import *
from autosportlabs.racecapture.views.configuration.rcp.obd2channelsview import *
from autosportlabs.racecapture.views.configuration.rcp.canconfigview import *
from autosportlabs.racecapture.views.configuration.rcp.telemetry.telemetryconfigview import *
from autosportlabs.racecapture.views.configuration.rcp.wirelessconfigview import *
from autosportlabs.racecapture.views.configuration.rcp.scriptview import *
from autosportlabs.racecapture.views.file.loaddialogview import LoadDialog
from autosportlabs.racecapture.views.file.savedialogview import SaveDialog
from autosportlabs.racecapture.views.util.alertview import alertPopup, confirmPopup
from autosportlabs.racecapture.config.rcpconfig import *
from autosportlabs.uix.button.featurebutton import FeatureButton
from autosportlabs.racecapture.theme.color import ColorScheme
RCP_CONFIG_FILE_EXTENSION = '.rcp'
class LinkedTreeViewLabel(TreeViewLabel):
Builder.load_string("""
<LinkedTreeViewLabel>:
font_size: dp(16)
font_name: 'resource/fonts/Roboto-Light.ttf'
""")
view = None
view_builder = None
class ConfigView(Screen):
Builder.load_string("""
<InfoFieldLabel@FieldLabel>:
color: ColorScheme.get_dark_primary_text()
halign: 'center'
font_size: min(50, sp(50))
shorten: False
<ConfigFeatureButton@FeatureButton>:
tile_color: ColorScheme.get_dark_accent()
icon_color: ColorScheme.get_accent()
title_color: ColorScheme.get_accent()
<ConfigView>:
BoxLayout:
orientation: 'horizontal'
BoxLayout:
size_hint_x: None
width: max(dp(150), 200)
orientation: 'vertical'
ScrollContainer:
id: scroller
do_scroll_x:False
TreeView:
height: max(self.minimum_height, scroller.height)
id: menu
size_hint_y: None
hide_root: True
indent_level: dp(0)
indent_start: dp(5)
BoxLayout:
id: button_panel
padding: (dp(5), dp(0))
size_hint_y: None
height: -10
BoxLayout:
orientation: 'vertical'
padding: (dp(0), dp(10))
BoxLayout:
orientation: 'horizontal'
size_hint_y: 0.45
spacing: dp(7)
LabelIconButton:
id: open
title: 'Open'
icon_size: self.height * 0.5
title_font_size: self.height * 0.35
icon: '\357\204\225'
on_press: root.openConfig()
LabelIconButton:
id: save
title: 'Save'
icon_size: self.height * 0.5
title_font_size: self.height * 0.35
icon: '\357\203\207'
on_press: root.saveConfig()
BoxLayout:
size_hint_y: 0.1
BoxLayout:
orientation: 'horizontal'
size_hint_y: 0.45
spacing: dp(7)
LabelIconButton:
id: read
title: 'Read'
icon_size: self.height * 0.5
title_font_size: self.height * 0.35
icon: '\357\202\223'
on_press: root.readConfig()
LabelIconButton:
id: write
title: 'Write'
icon_size: self.height * 0.5
title_font_size: self.height * 0.35
icon: '\357\200\231'
on_press: root.writeConfig()
disabled: True
BoxLayout:
size_hint_x: 0.8
padding: [sp(10), sp(10), 0, 0]
id: content
orientation: 'vertical'
Widget:
size_hint_y: 0.15
BoxLayout:
orientation: 'horizontal'
size_hint_y: 0.3
padding: (dp(30), dp(15))
spacing: dp(30)
ConfigFeatureButton:
id: read
title: 'Read'
icon: u'\uf019'
on_press: root.readConfig()
ConfigFeatureButton:
id: open
title: 'Open'
icon: u'\uf07c'
on_press: root.openConfig()
Widget:
size_hint_x: None
width: max(dp(150), 200)
BoxLayout:
orientation: 'horizontal'
size_hint_y: 0.3
padding: (dp(30), dp(15))
spacing: dp(30)
ConfigFeatureButton:
id: first_time_setup
title: 'First time Setup'
icon: u'\uf138'
on_press: root._on_first_time_setup()
Widget:
size_hint_x: None
width: max(dp(150), 200)
Widget:
size_hint_y: 0.15
""")
# file save/load
loaded = BooleanProperty(False)
writeStale = BooleanProperty(False)
loadfile = ObjectProperty()
savefile = ObjectProperty()
text_input = ObjectProperty()
track_manager = ObjectProperty()
preset_manager = ObjectProperty()
# List of config views
configViews = []
menu = None
rc_config = None
script_view = None
_settings = None
base_dir = None
_databus = None
def __init__(self, **kwargs):
super(ConfigView, self).__init__(**kwargs)
self._status_pump = kwargs.get('status_pump')
self._databus = kwargs.get('databus')
self.rc_config = kwargs.get('rcpConfig', None)
self.rc_api = kwargs.get('rc_api', None)
self._settings = kwargs.get('settings')
self.base_dir = kwargs.get('base_dir')
self.register_event_type('on_config_updated')
self.register_event_type('on_channels_updated')
self.register_event_type('on_config_written')
self.register_event_type('on_tracks_updated')
self.register_event_type('on_config_modified')
self.register_event_type('on_read_config')
self.register_event_type('on_write_config')
self.register_event_type('on_show_main_view')
self._sn = ''
if self.rc_config:
self._sn = self.rc_config.versionConfig.serial
self.ids.menu.bind(selected_node=self.on_select_node)
def on_show_main_view(self, name):
pass
def on_config_written(self, *args):
self.writeStale = False
def on_config_modified(self, *args):
self.writeStale = True
def update_runtime_channels(self, system_channels):
for view in self.configViews:
channelWidgets = list(kvquery(view, __class__=ChannelNameSpinner))
for channelWidget in channelWidgets:
channelWidget.dispatch('on_channels_updated', system_channels)
def on_channels_updated(self, runtime_channels):
self.update_runtime_channels(runtime_channels)
def on_config_updated(self, config, force_reload=False):
if config.versionConfig.serial != self._sn or force_reload:
# New device or we need to redraw, reload everything
# Our config object is the same object with new values, so we need to copy our value
self._sn = copy(config.versionConfig.serial)
self._clear()
self.init_screen()
else:
self.rc_config = config
self.update_config_views()
def _clear(self):
nodes = []
# Building an array because if we remove while iterating we end up skipping things
for node in self.ids.menu.iterate_all_nodes():
nodes.append(node)
for node in nodes:
self.ids.menu.remove_node(node)
self.ids.menu.clear_widgets()
del(self.configViews[:])
self.ids.content.clear_widgets()
def on_track_manager(self, instance, value):
self.update_tracks()
def on_enter(self):
if not self.loaded and self.rc_config.loaded == True:
self.init_screen()
def on_loaded(self, instance, value):
self.update_config_views()
self.update_tracks()
def on_writeStale(self, instance, value):
self.updateControls()
def _reset_stale(self):
self.writeStale = False
def update_config_views(self):
config = self.rc_config
if config and self.loaded:
for view in self.configViews:
view.dispatch('on_config_updated', config)
self._reset_stale()
def _on_first_time_setup(self):
self.dispatch('on_show_main_view', 'setup')
def init_screen(self):
self.createConfigViews()
def createConfigViews(self):
def attach_node(text, n, view_builder):
tree = self.ids.menu
label = LinkedTreeViewLabel(text=text)
label.view_builder = view_builder
label.color_selected = ColorScheme.get_dark_primary()
return tree.add_node(label, n)
def create_scripting_view(capabilities):
script_view = LuaScriptingView(capabilities, rc_api=self.rc_api)
self.script_view = script_view
return script_view
runtime_channels = self._settings.runtimeChannels
default_node = attach_node('Race Tracks', None, lambda: TrackConfigView(status_pump=self._status_pump,
databus=self._databus,
rc_api=self.rc_api,
settings=self._settings,
track_manager=self.track_manager))
if self.rc_config.capabilities.has_gps:
attach_node('GPS', None, lambda: GPSChannelsView())
attach_node('Race Timing', None, lambda: LapStatsView())
if self.rc_config.capabilities.has_analog:
attach_node('Analog Sensors', None, lambda: AnalogChannelsView(channels=runtime_channels, preset_manager=self.preset_manager))
if self.rc_config.capabilities.has_timer:
attach_node('Pulse/RPM Sensors', None, lambda: PulseChannelsView(channels=runtime_channels))
if self.rc_config.capabilities.has_gpio:
attach_node('Digital In/Out', None, lambda: GPIOChannelsView(channels=runtime_channels))
if self.rc_config.capabilities.has_imu:
attach_node('Accel/Gyro', None, lambda: ImuChannelsView(rc_api=self.rc_api))
if self.rc_config.capabilities.has_pwm:
attach_node('Pulse/Analog Out', None, lambda: AnalogPulseOutputChannelsView(channels=runtime_channels))
attach_node('CAN Bus', None, lambda: CANConfigView())
if self.rc_config.capabilities.has_can_channel:
attach_node('CAN Mapping', None, lambda: CANChannelsView(settings=self._settings, preset_manager=self.preset_manager, channels=runtime_channels, base_dir=self.base_dir))
attach_node('OBDII', None, lambda: OBD2ChannelsView(channels=runtime_channels, base_dir=self.base_dir, preset_manager=self.preset_manager))
attach_node('Automatic Control', None, lambda: AutoControlConfigView(channels=runtime_channels))
attach_node('Wireless', None, lambda: WirelessConfigView(self.base_dir, self.rc_config, self.rc_config.capabilities))
attach_node('Telemetry', None, lambda: TelemetryConfigView(self.rc_config.capabilities))
if self.rc_config.capabilities.has_script:
node_name = 'Scripting'
else:
node_name = 'Logs'
attach_node(node_name, None, lambda: create_scripting_view(self.rc_config.capabilities))
if self.rc_api.is_firmware_update_supported():
from autosportlabs.racecapture.views.configuration.rcp.firmwareupdateview import FirmwareUpdateView
attach_node('Firmware', None, lambda: FirmwareUpdateView(rc_api=self.rc_api, settings=self._settings))
self.ids.menu.select_node(default_node)
self.update_runtime_channels(runtime_channels)
self.update_tracks()
self.ids.button_panel.height = max(dp(100), 150)
self.loaded = True
def show_node(self, node):
view = node.view
if not view:
view = node.view_builder()
self.configViews.append(view)
view.bind(on_config_modified=self.on_config_modified)
node.view = view
if self.loaded:
if self.rc_config:
view.dispatch('on_config_updated', self.rc_config)
if self.track_manager:
view.dispatch('on_tracks_updated', self.track_manager)
if view.get_parent_window() is None:
Clock.schedule_once(lambda dt: self.ids.content.add_widget(view))
def on_select_node(self, instance, value):
if not value:
return
# ensure that any keyboard is released
try:
self.ids.content.get_parent_window().release_keyboard()
except:
pass
self.ids.content.clear_widgets()
Clock.schedule_once(lambda dt: self.show_node(value))
def updateControls(self):
Logger.debug("ConfigView: data is stale: " + str(self.writeStale))
write_button = self.ids.write
write_button.disabled = not self.writeStale
write_button.pulsing = self.writeStale
Clock.schedule_once(lambda dt: HelpInfo.help_popup('rc_write_config', self, arrow_pos='left_mid'), 1.0)
def update_tracks(self):
track_manager = self.track_manager
if track_manager and self.loaded:
for view in self.configViews:
view.dispatch('on_tracks_updated', track_manager)
def on_tracks_updated(self, track_manager):
self.track_manager = track_manager
def on_read_config(self, instance, *args):
pass
def on_write_config(self, instance, *args):
pass
def readConfig(self):
if self.writeStale == True:
popup = None
def _on_answer(instance, answer):
if answer:
self.dispatch('on_read_config', None)
popup.dismiss()
popup = confirmPopup('Confirm', 'Configuration Modified - Continue Loading?', _on_answer)
else:
self.dispatch('on_read_config', None)
def writeConfig(self):
if self.rc_config.loaded:
self.dispatch('on_write_config', None)
else:
alertPopup('Warning', 'Please load or read a configuration before writing')
def openConfig(self):
if self.writeStale:
popup = None
def _on_answer(instance, answer):
if answer:
self.doOpenConfig()
popup.dismiss()
popup = confirmPopup('Confirm', 'Configuration Modified - Open Configuration?', _on_answer)
else:
self.doOpenConfig()
def set_config_file_path(self, path):
self._settings.userPrefs.set_pref('preferences', 'config_file_dir', path)
def get_config_file_path(self):
return self._settings.userPrefs.get_pref('preferences', 'config_file_dir')
def doOpenConfig(self):
content = LoadDialog(ok=self.load, cancel=self.dismiss_popup, filters=['*' + RCP_CONFIG_FILE_EXTENSION], user_path=self.get_config_file_path())
self._popup = Popup(title="Load file", content=content, size_hint=(0.9, 0.9))
self._popup.open()
def saveConfig(self):
if self.rc_config.loaded:
content = SaveDialog(ok=self.save, cancel=self.dismiss_popup, filters=['*' + RCP_CONFIG_FILE_EXTENSION], user_path=self.get_config_file_path())
self._popup = Popup(title="Save file", content=content, size_hint=(0.9, 0.9))
self._popup.open()
else:
alertPopup('Warning', 'Please load or read a configuration before saving')
def load(self, instance):
self.set_config_file_path(instance.path)
self.dismiss_popup()
try:
selection = instance.selection
filename = selection[0] if len(selection) else None
if filename:
with open(filename) as stream:
rcpConfigJsonString = stream.read()
self.rc_config.fromJsonString(rcpConfigJsonString)
self.rc_config.stale = True
self.on_config_updated(self.rc_config, force_reload=True)
self.on_config_modified()
else:
alertPopup('Error Loading', 'No config file selected')
except Exception as detail:
alertPopup('Error Loading', 'Failed to Load Configuration:\n\n' + str(detail))
Logger.exception('ConfigView: Error loading config: ' + str(detail))
def save(self, instance):
def _do_save_config(filename):
if not filename.endswith(RCP_CONFIG_FILE_EXTENSION): filename += RCP_CONFIG_FILE_EXTENSION
with open(filename, 'w') as stream:
configJson = self.rc_config.toJsonString()
stream.write(configJson)
self.set_config_file_path(instance.path)
self.dismiss_popup()
config_filename = instance.filename
if len(config_filename):
try:
config_filename = os.path.join(instance.path, config_filename)
if os.path.isfile(config_filename):
def _on_answer(instance, answer):
if answer:
_do_save_config(config_filename)
popup.dismiss()
popup = confirmPopup('Confirm', 'File Exists - overwrite?', _on_answer)
else:
_do_save_config(config_filename)
except Exception as detail:
alertPopup('Error Saving', 'Failed to save:\n\n' + str(detail))
Logger.exception('ConfigView: Error Saving config: ' + str(detail))
def dismiss_popup(self, *args):
self._popup.dismiss()
| gpl-3.0 |
jetty840/ReplicatorG | skein_engines/skeinforge-35/skeinforge_application/skeinforge_plugins/craft_plugins/export_plugins/static_plugins/gcode_small.py | 6 | 4301 | """
This page is in the table of contents.
Gcode_small is an export plugin to remove the comments and the redundant z and feed rate parameters from a gcode file.
An export plugin is a script in the export_plugins folder which has the getOutput function, the globalIsReplaceable variable and if it's output is not replaceable, the writeOutput function. It is meant to be run from the export tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name.
The getOutput function of this script takes a gcode text and returns that text without comments and redundant z and feed rate parameters. The writeOutput function of this script takes a gcode text and writes that text without comments and redundant z and feed rate parameters to a file.
Many of the functions in this script are copied from gcodec in skeinforge_utilities. They are copied rather than imported so developers making new plugins do not have to learn about gcodec, the code here is all they need to learn.
"""
from __future__ import absolute_import
import cStringIO
import os
__author__ = 'Enrique Perez ([email protected])'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GPL 3.0'
# This is true if the output is text and false if it is binary."
globalIsReplaceable = True
def getOutput(gcodeText):
'Get the exported version of a gcode file.'
return GcodeSmallSkein().getCraftedGcode(gcodeText)
def getSplitLineBeforeBracketSemicolon(line):
"Get the split line before a bracket or semicolon."
bracketSemicolonIndex = min( line.find(';'), line.find('(') )
if bracketSemicolonIndex < 0:
return line.split()
return line[ : bracketSemicolonIndex ].split()
def getStringFromCharacterSplitLine(character, splitLine):
"Get the string after the first occurence of the character in the split line."
indexOfCharacter = getIndexOfStartingWithSecond(character, splitLine)
if indexOfCharacter < 0:
return None
return splitLine[indexOfCharacter][1 :]
def getSummarizedFileName(fileName):
"Get the fileName basename if the file is in the current working directory, otherwise return the original full name."
if os.getcwd() == os.path.dirname(fileName):
return os.path.basename(fileName)
return fileName
def getTextLines(text):
"Get the all the lines of text of a text."
return text.replace('\r', '\n').split('\n')
def getIndexOfStartingWithSecond(letter, splitLine):
"Get index of the first occurence of the given letter in the split line, starting with the second word. Return - 1 if letter is not found"
for wordIndex in xrange( 1, len(splitLine) ):
word = splitLine[ wordIndex ]
firstLetter = word[0]
if firstLetter == letter:
return wordIndex
return - 1
class GcodeSmallSkein:
"A class to remove redundant z and feed rate parameters from a skein of extrusions."
def __init__(self):
self.lastFeedRateString = None
self.lastZString = None
self.output = cStringIO.StringIO()
def getCraftedGcode( self, gcodeText ):
"Parse gcode text and store the gcode."
lines = getTextLines(gcodeText)
for line in lines:
self.parseLine(line)
return self.output.getvalue()
def parseLine(self, line):
"Parse a gcode line."
splitLine = getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if len(firstWord) < 1:
return
if firstWord[0] == '(':
return
if firstWord != 'G1':
self.output.write(line + '\n')
return
eString = getStringFromCharacterSplitLine('E', splitLine )
xString = getStringFromCharacterSplitLine('X', splitLine )
yString = getStringFromCharacterSplitLine('Y', splitLine )
zString = getStringFromCharacterSplitLine('Z', splitLine )
feedRateString = getStringFromCharacterSplitLine('F', splitLine )
self.output.write('G1')
if xString != None:
self.output.write(' X' + xString )
if yString != None:
self.output.write(' Y' + yString )
if zString != None and zString != self.lastZString:
self.output.write(' Z' + zString )
if feedRateString != None and feedRateString != self.lastFeedRateString:
self.output.write(' F' + feedRateString )
if eString != None:
self.output.write(' E' + eString )
self.lastFeedRateString = feedRateString
self.lastZString = zString
self.output.write('\n')
| gpl-2.0 |
sencha/chromium-spacewalk | chrome/test/chromedriver/third_party/googlecode/googlecode_upload.py | 160 | 8608 | #!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: [email protected] (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""Google Code file uploader script.
"""
__author__ = '[email protected] (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of [email protected]. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
if user_name is None or password is None:
from netrc import netrc
# Chromium edit: Works on windows without requiring HOME to be set.
netrc_path = os.path.join(os.path.expanduser('~'), '.netrc')
authenticators = netrc(netrc_path).authenticators("code.google.com")
if authenticators:
if user_name is None:
user_name = authenticators[0]
if password is None:
password = authenticators[2]
if user_name is None or password is None:
raise RuntimeError('Missing user credentials for upload')
return upload(file_path, project_name, user_name, password, summary, labels)
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
RCOSDP/waterbutler | waterbutler/core/streams/base64.py | 4 | 1100 | import base64
import asyncio
class Base64EncodeStream(asyncio.StreamReader):
@staticmethod
def calculate_encoded_size(size):
size = 4 * size / 3
if size % 4:
size += 4 - size % 4
return int(size)
def __init__(self, stream, **kwargs):
self.extra = b''
self.stream = stream
if stream.size is None:
self._size = None
else:
self._size = Base64EncodeStream.calculate_encoded_size(stream.size)
super().__init__(**kwargs)
@property
def size(self):
return self._size
async def read(self, n=-1):
if n < 0:
return (await super().read(n))
nog = n
padding = n % 3
if padding:
n += (3 - padding)
chunk = self.extra + base64.b64encode((await self.stream.read(n)))
if len(chunk) <= nog:
self.extra = b''
return chunk
chunk, self.extra = chunk[:nog], chunk[nog:]
return chunk
def at_eof(self):
return len(self.extra) == 0 and self.stream.at_eof()
| apache-2.0 |
Mashape/unirest-python | unirest/test/test_unirest.py | 4 | 5262 | # -*- coding:utf-8 -*-
import sys
import os
import unittest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import unirest
class UnirestTestCase(unittest.TestCase):
def test_get(self):
response = unirest.get('http://httpbin.org/get?name=Mark', params={"nick":"thefosk"})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 2)
self.assertEqual(response.body['args']['name'], "Mark")
self.assertEqual(response.body['args']['nick'], "thefosk")
def test_get2(self):
response = unirest.get('http://httpbin.org/get?name=Mark', params={"nick":"the fosk"})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 2)
self.assertEqual(response.body['args']['name'], "Mark")
self.assertEqual(response.body['args']['nick'], "the fosk")
def test_get_unicode_param(self):
response = unirest.get('http://httpbin.org/get?name=Shimada', params={"nick":u"しまりん"})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 2)
self.assertEqual(response.body['args']['name'], "Shimada")
self.assertEqual(response.body['args']['nick'], u"しまりん")
def test_get_none_param(self):
response = unirest.get('http://httpbin.org/get?name=Mark', params={"nick":"thefosk", "age": None, "third":""})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 3)
self.assertEqual(response.body['args']['name'], "Mark")
self.assertEqual(response.body['args']['nick'], "thefosk")
self.assertEqual(response.body['args']['third'], "")
def test_post(self):
response = unirest.post('http://httpbin.org/post', params={"name":"Mark", "nick":"thefosk"})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 0)
self.assertEqual(len(response.body['form']), 2)
self.assertEqual(response.body['form']['name'], "Mark")
self.assertEqual(response.body['form']['nick'], "thefosk")
def test_post_none_param(self):
response = unirest.post('http://httpbin.org/post', params={"name":"Mark", "nick":"thefosk", "age": None, "third":""})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 0)
self.assertEqual(len(response.body['form']), 3)
self.assertEqual(response.body['form']['name'], "Mark")
self.assertEqual(response.body['form']['nick'], "thefosk")
self.assertEqual(response.body['form']['third'], "")
def test_delete(self):
response = unirest.delete('http://httpbin.org/delete', params={"name":"Mark", "nick":"thefosk"})
self.assertEqual(response.code, 200)
self.assertEqual(response.body['form']['name'], "Mark")
self.assertEqual(response.body['form']['nick'], "thefosk")
def test_put(self):
response = unirest.put('http://httpbin.org/put', params={"name":"Mark", "nick":"thefosk"})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 0)
self.assertEqual(len(response.body['form']), 2)
self.assertEqual(response.body['form']['name'], "Mark")
self.assertEqual(response.body['form']['nick'], "thefosk")
def test_patch(self):
response = unirest.patch('http://httpbin.org/patch', params={"name":"Mark", "nick":"thefosk"})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 0)
self.assertEqual(len(response.body['form']), 2)
self.assertEqual(response.body['form']['name'], "Mark")
self.assertEqual(response.body['form']['nick'], "thefosk")
def test_post_entity(self):
response = unirest.post('http://httpbin.org/post', headers={'Content-Type':'text/plain'}, params="hello this is custom data")
self.assertEqual(response.code, 200)
self.assertEqual(response.body['data'], "hello this is custom data")
def test_gzip(self):
response = unirest.get('http://httpbin.org/gzip', params={"name":"Mark"})
self.assertEqual(response.code, 200)
self.assertTrue(response.body['gzipped'])
def test_basicauth(self):
response = unirest.get('http://httpbin.org/get', auth=('marco', 'password'))
self.assertEqual(response.code, 200)
self.assertEqual(response.body['headers']['Authorization'], "Basic bWFyY286cGFzc3dvcmQ=")
def test_defaultheaders(self):
unirest.default_header('custom','custom header')
response = unirest.get('http://httpbin.org/get')
self.assertEqual(response.code, 200)
self.assertTrue('Custom' in response.body['headers']);
self.assertEqual(response.body['headers']['Custom'], "custom header")
# Make another request
response = unirest.get('http://httpbin.org/get')
self.assertEqual(response.code, 200)
self.assertTrue('Custom' in response.body['headers']);
self.assertTrue(response.body['headers']['Custom'], "custom header")
# Clear the default headers
unirest.clear_default_headers()
response = unirest.get('http://httpbin.org/get')
self.assertEqual(response.code, 200)
self.assertFalse('Custom' in response.body['headers']);
def test_timeout(self):
unirest.timeout(3)
response = unirest.get('http://httpbin.org/delay/1')
self.assertEqual(response.code, 200)
unirest.timeout(1)
try:
response = unirest.get('http://httpbin.org/delay/3')
self.fail("The timeout didn't work")
except:
pass
if __name__ == '__main__':
unittest.main()
| mit |
pabloborrego93/edx-platform | common/lib/xmodule/xmodule/annotator_token.py | 211 | 1542 | """
This file contains a function used to retrieve the token for the annotation backend
without having to create a view, but just returning a string instead.
It can be called from other files by using the following:
from xmodule.annotator_token import retrieve_token
"""
import datetime
from firebase_token_generator import create_token
def retrieve_token(userid, secret):
'''
Return a token for the backend of annotations.
It uses the course id to retrieve a variable that contains the secret
token found in inheritance.py. It also contains information of when
the token was issued. This will be stored with the user along with
the id for identification purposes in the backend.
'''
# the following five lines of code allows you to include the default timezone in the iso format
# for more information: http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
newhour, newmin = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60)
newtime = "%s%+02d:%02d" % (dtnow.isoformat(), newhour, newmin)
# uses the issued time (UTC plus timezone), the consumer key and the user's email to maintain a
# federated system in the annotation backend server
custom_data = {"issuedAt": newtime, "consumerKey": secret, "userId": userid, "ttl": 86400}
newtoken = create_token(secret, custom_data)
return newtoken
| agpl-3.0 |
pabloborrego93/edx-platform | pavelib/tests.py | 9 | 11010 | """
Unit test tasks
"""
import re
import os
import sys
from paver.easy import sh, task, cmdopts, needs
from pavelib.utils.test import suites
from pavelib.utils.envs import Env
from pavelib.utils.timer import timed
from pavelib.utils.passthrough_opts import PassthroughTask
from optparse import make_option
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text
__test__ = False # do not collect
@needs(
'pavelib.prereqs.install_prereqs',
'pavelib.utils.test.utils.clean_reports_dir',
)
@cmdopts([
("system=", "s", "System to act on"),
("test-id=", "t", "Test id"),
("fail-fast", "x", "Fail suite on first failed test"),
("fasttest", "a", "Run without collectstatic"),
make_option(
'-c', '--cov-args', default='',
help='adds as args to coverage for the test run'
),
('skip-clean', 'C', 'skip cleaning repository before running tests'),
('processes=', 'p', 'number of processes to use running tests'),
make_option('-r', '--randomize', action='store_true', help='run the tests in a random order'),
make_option('--no-randomize', action='store_false', dest='randomize', help="don't run the tests in a random order"),
make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
make_option(
'--disable-migrations',
action='store_true',
dest='disable_migrations',
help="Create tables directly from apps' models. Can also be used by exporting DISABLE_MIGRATIONS=1."
),
make_option(
'--enable-migrations',
action='store_false',
dest='disable_migrations',
help="Create tables by applying migrations."
),
("fail_fast", None, "deprecated in favor of fail-fast"),
("test_id=", None, "deprecated in favor of test-id"),
('cov_args=', None, 'deprecated in favor of cov-args'),
make_option(
"-e", "--extra_args", default="",
help="deprecated, pass extra options directly in the paver commandline"
),
('skip_clean', None, 'deprecated in favor of skip-clean'),
], share_with=['pavelib.utils.test.utils.clean_reports_dir'])
@PassthroughTask
@timed
def test_system(options, passthrough_options):
"""
Run tests on our djangoapps for lms and cms
"""
system = getattr(options, 'system', None)
test_id = getattr(options, 'test_id', None)
if test_id:
if not system:
system = test_id.split('/')[0]
if system in ['common', 'openedx']:
system = 'lms'
options.test_system['test_id'] = test_id
if test_id or system:
system_tests = [suites.SystemTestSuite(
system,
passthrough_options=passthrough_options,
**options.test_system
)]
else:
system_tests = []
for syst in ('cms', 'lms'):
system_tests.append(suites.SystemTestSuite(
syst,
passthrough_options=passthrough_options,
**options.test_system
))
test_suite = suites.PythonTestSuite(
'python tests',
subsuites=system_tests,
passthrough_options=passthrough_options,
**options.test_system
)
test_suite.run()
@needs(
'pavelib.prereqs.install_prereqs',
'pavelib.utils.test.utils.clean_reports_dir',
)
@cmdopts([
("lib=", "l", "lib to test"),
("test-id=", "t", "Test id"),
("failed", "f", "Run only failed tests"),
("fail-fast", "x", "Run only failed tests"),
make_option(
'-c', '--cov-args', default='',
help='adds as args to coverage for the test run'
),
('skip-clean', 'C', 'skip cleaning repository before running tests'),
make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
('cov_args=', None, 'deprecated in favor of cov-args'),
make_option(
'-e', '--extra_args', default='',
help='deprecated, pass extra options directly in the paver commandline'
),
("fail_fast", None, "deprecated in favor of fail-fast"),
('skip_clean', None, 'deprecated in favor of skip-clean'),
("test_id=", None, "deprecated in favor of test-id"),
], share_with=['pavelib.utils.test.utils.clean_reports_dir'])
@PassthroughTask
@timed
def test_lib(options, passthrough_options):
"""
Run tests for common/lib/ and pavelib/ (paver-tests)
"""
lib = getattr(options, 'lib', None)
test_id = getattr(options, 'test_id', lib)
if test_id:
if '/' in test_id:
lib = '/'.join(test_id.split('/')[0:3])
else:
lib = 'common/lib/' + test_id.split('.')[0]
options.test_lib['test_id'] = test_id
lib_tests = [suites.LibTestSuite(
lib,
passthrough_options=passthrough_options,
**options.test_lib
)]
else:
lib_tests = [
suites.LibTestSuite(
d,
passthrough_options=passthrough_options,
**options.test_lib
) for d in Env.LIB_TEST_DIRS
]
test_suite = suites.PythonTestSuite(
'python tests',
subsuites=lib_tests,
passthrough_options=passthrough_options,
**options.test_lib
)
test_suite.run()
@needs(
'pavelib.prereqs.install_prereqs',
'pavelib.utils.test.utils.clean_reports_dir',
)
@cmdopts([
("failed", "f", "Run only failed tests"),
("fail-fast", "x", "Run only failed tests"),
make_option(
'-c', '--cov-args', default='',
help='adds as args to coverage for the test run'
),
make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
make_option(
'--disable-migrations',
action='store_true',
dest='disable_migrations',
help="Create tables directly from apps' models. Can also be used by exporting DISABLE_MIGRATIONS=1."
),
('cov_args=', None, 'deprecated in favor of cov-args'),
make_option(
'-e', '--extra_args', default='',
help='deprecated, pass extra options directly in the paver commandline'
),
("fail_fast", None, "deprecated in favor of fail-fast"),
])
@PassthroughTask
@timed
def test_python(options, passthrough_options):
"""
Run all python tests
"""
python_suite = suites.PythonTestSuite(
'Python Tests',
passthrough_options=passthrough_options,
**options.test_python
)
python_suite.run()
@needs(
'pavelib.prereqs.install_prereqs',
'pavelib.utils.test.utils.clean_reports_dir',
)
@cmdopts([
("suites", "s", "List of unit test suites to run. (js, lib, cms, lms)"),
make_option(
'-c', '--cov-args', default='',
help='adds as args to coverage for the test run'
),
make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
('cov_args=', None, 'deprecated in favor of cov-args'),
make_option(
'-e', '--extra_args', default='',
help='deprecated, pass extra options directly in the paver commandline'
),
])
@PassthroughTask
@timed
def test(options, passthrough_options):
"""
Run all tests
"""
# Subsuites to be added to the main suite
python_suite = suites.PythonTestSuite(
'Python Tests',
passthrough_options=passthrough_options,
**options.test
)
js_suite = suites.JsTestSuite('JS Tests', mode='run', with_coverage=True)
# Main suite to be run
all_unittests_suite = suites.TestSuite('All Tests', subsuites=[js_suite, python_suite])
all_unittests_suite.run()
@task
@needs('pavelib.prereqs.install_coverage_prereqs')
@cmdopts([
("compare-branch=", "b", "Branch to compare against, defaults to origin/master"),
("compare_branch=", None, "deprecated in favor of compare-branch"),
])
@timed
def coverage():
"""
Build the html, xml, and diff coverage reports
"""
report_dir = Env.REPORT_DIR
rcfile = Env.PYTHON_COVERAGERC
if not (report_dir / '.coverage').isfile():
# This may be that the coverage files were generated using -p,
# try to combine them to the one file that we need.
sh("coverage combine --rcfile={}".format(rcfile))
if not os.path.getsize(report_dir / '.coverage') > 50:
# Check if the .coverage data file is larger than the base file,
# because coverage combine will always at least make the "empty" data
# file even when there isn't any data to be combined.
err_msg = colorize(
'red',
"No coverage info found. Run `paver test` before running "
"`paver coverage`.\n"
)
sys.stderr.write(err_msg)
return
# Generate the coverage.py XML report
sh("coverage xml --rcfile={}".format(rcfile))
# Generate the coverage.py HTML report
sh("coverage html --rcfile={}".format(rcfile))
diff_coverage() # pylint: disable=no-value-for-parameter
@task
@needs('pavelib.prereqs.install_coverage_prereqs')
@cmdopts([
("compare-branch=", "b", "Branch to compare against, defaults to origin/master"),
("compare_branch=", None, "deprecated in favor of compare-branch"),
], share_with=['coverage'])
@timed
def diff_coverage(options):
"""
Build the diff coverage reports
"""
compare_branch = options.get('compare_branch', 'origin/master')
# Find all coverage XML files (both Python and JavaScript)
xml_reports = []
for filepath in Env.REPORT_DIR.walk():
if bool(re.match(r'^coverage.*\.xml$', filepath.basename())):
xml_reports.append(filepath)
if not xml_reports:
err_msg = colorize(
'red',
"No coverage info found. Run `paver test` before running "
"`paver coverage`.\n"
)
sys.stderr.write(err_msg)
else:
xml_report_str = ' '.join(xml_reports)
diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html')
# Generate the diff coverage reports (HTML and console)
sh(
"diff-cover {xml_report_str} --compare-branch={compare_branch} "
"--html-report {diff_html_path}".format(
xml_report_str=xml_report_str,
compare_branch=compare_branch,
diff_html_path=diff_html_path,
)
)
print "\n"
| agpl-3.0 |
demiangomez/Parallel.GAMIT | com/ScanArchive.py | 1 | 66663 | """
Project: Parallel.Archive
Date: 02/16/2017
Author: Demian D. Gomez
Main routines to load the RINEX files to the database, load station information, run PPP on the archive files and obtain
the OTL coefficients
usage: pyScanArchive.py [-h] [-rinex] [-otl]
[-stninfo [argument [argument ...]]]
[-ppp [argument [argument ...]]]
[-rehash [argument [argument ...]]] [-np]
all|net.stnm [all|net.stnm ...]
Archive operations Main Program
positional arguments:
all|net.stnm List of networks/stations to process given in
[net].[stnm] format or just [stnm] (separated by
spaces; if [stnm] is not unique in the database, all
stations with that name will be processed). Use
keyword 'all' to process all stations in the database.
If [net].all is given, all stations from network [net]
will be processed. Alternatevily, a file with the
station list can be provided.
optional arguments:
-h, --help show this help message and exit
-rinex, --rinex Scan the current archive for RINEX 2/3 files.
-otl, --ocean_loading
Calculate ocean loading coefficients.
-stninfo [argument [argument ...]], --station_info [argument [argument ...]]
Insert station information to the database. If no
arguments are given, then scan the archive for station
info files and use their location (folder) to
determine the network to use during insertion. Only
stations in the station list will be processed. If a
filename is provided, then scan that file only, in
which case a second argument specifies the network to
use during insertion. Eg: -stninfo ~/station.info arg.
In cases where multiple networks are being processed,
the network argument will be used to desambiguate
station code conflicts. Eg: pyScanArchive all -stninfo
~/station.info arg -> if a station named igm1 exists
in networks 'igs' and 'arg', only 'arg.igm1' will get
the station information insert. Use keyword 'stdin' to
read the station information data from the pipeline.
-ppp [argument [argument ...]], --ppp [argument [argument ...]]
Run ppp on the rinex files in the database. Append
[date_start] and (optionally) [date_end] to limit the
range of the processing. Allowed formats are yyyy.doy
or yyyy/mm/dd. Append keyword 'hash' to the end to
check the PPP hash values against the station
information records. If hash doesn't match,
recalculate the PPP solutions.
-rehash [argument [argument ...]], --rehash [argument [argument ...]]
Check PPP hash against station information hash.
Rehash PPP solutions to match the station information
hash without recalculating the PPP solution.
Optionally append [date_start] and (optionally)
[date_end] to limit the rehashing time window. Allowed
formats are yyyy.doy or yyyy/mm/dd.
-np, --noparallel Execute command without parallelization.
"""
import pyArchiveStruct
import dbConnection
import pyDate
import pyRinex
import pyRinexName
import traceback
import datetime
import os
import pyOTL
import pyStationInfo
import sys
import pySp3
import pyBrdc
import pyClk
import pyPPP
from tqdm import tqdm
import argparse
import numpy
import pyOptions
import Utils
import platform
import pyJobServer
from Utils import print_columns
from Utils import process_date
from Utils import ecef2lla
import pyEvents
import scandir
import json
import shutil
import glob
import uuid
from decimal import Decimal
import zipfile
error_message = False
class Encoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Decimal):
return float(o)
if isinstance(o, datetime.datetime):
return datetime.datetime.strftime(o, '%Y-%m-%d %H:%M:%S')
return super(Encoder, self).default(o)
class callback_class():
def __init__(self, pbar):
self.errors = None
self.pbar = pbar
def callbackfunc(self, args):
msg = args
self.errors = msg
self.pbar.update(1)
def callback_handle(job):
global error_message
if job.result is not None or job.exception:
error_message = True
if job.result:
msg = job.result
else:
msg = job.exception
tqdm.write(' -- There were unhandled errors during this batch. '
'Please check errors_pyArchiveService.log for details')
f = open('errors_pyScanArchive.log', 'a')
f.write('ON ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ' an unhandled error occurred:\n')
f.write(msg + '\n')
f.write('END OF ERROR =================== \n\n')
f.close()
def verify_rinex_date_multiday(cnn, date, rinexinfo, Config):
# function to verify if rinex is multiday or if the file is from the date it was stored in the archive
# returns true if parent process can continue with insert
# returns false if file had to be moved from the archive (date != rinex.date or multiday file)
# check if rinex is a multiday file (rinex with more than one day of observations)
if rinexinfo.multiday:
# move all the files to the repository, delete the crinex from the archive, log the event
rnxlist = []
for rnx in rinexinfo.multiday_rnx_list:
rnxlist.append(rnx.rinex)
# some other file, move it to the repository
retry_folder = os.path.join(Config.repository_data_in_retry, 'multidays_found/' + rnx.date.yyyy() + '/' + rnx.date.ddd())
rnx.compress_local_copyto(retry_folder)
# if the file corresponding to this session is found, assign its object to rinexinfo
event = pyEvents.Event(
Description='%s was a multi-day rinex file. The following rinex files where generated and moved to the repository/data_in_retry: %s.' % (
rinexinfo.origin_file, ','.join(rnxlist)),
NetworkCode=rinexinfo.NetworkCode,
EventType='warn',
StationCode=rinexinfo.StationCode,
Year=int(rinexinfo.date.year),
DOY=int(rinexinfo.date.doy))
cnn.insert_event(event)
# remove crinex from archive
os.remove(rinexinfo.origin_file)
return False
# compare the date of the rinex with the date in the archive
if not date == rinexinfo.date:
# move the file out of the archive because it's in the wrong spot (wrong folder, wrong name, etc)
# let pyArchiveService fix the issue
retry_folder = os.path.join(Config.repository_data_in_retry, 'wrong_date_found/' + date.yyyy() + '/' + date.ddd())
# move the crinex out of the archive
rinexinfo.move_origin_file(retry_folder)
event = pyEvents.Event(
Description='The date in the archive for ' + rinexinfo.rinex + ' (' + date.yyyyddd() + ') does not agree with the mean session date (' +
rinexinfo.date.yyyyddd() + '). The file was moved to the repository/data_in_retry.',
NetworkCode=rinexinfo.NetworkCode,
EventType='warn',
StationCode=rinexinfo.StationCode,
Year=int(rinexinfo.date.year),
DOY=int(rinexinfo.date.doy))
cnn.insert_event(event)
return False
return True
def try_insert(NetworkCode, StationCode, year, doy, rinex):
try:
# try to open a connection to the database
cnn = dbConnection.Cnn("gnss_data.cfg")
Config = pyOptions.ReadOptions("gnss_data.cfg")
# get the rejection directory ready
data_reject = os.path.join(Config.repository_data_reject, 'bad_rinex/%i/%03i' % (year, doy))
# get the rinex file name
rnx_name = pyRinexName.RinexNameFormat(rinex)
except Exception:
return traceback.format_exc() + ' processing rinex: %s (%s.%s %s %s) using node %s' \
% (rinex, NetworkCode, StationCode, str(year), str(doy), platform.node())
try:
filename = rnx_name.to_rinex_format(pyRinexName.TYPE_RINEX, no_path=True)
# build the archive level sql string
# the file has not to exist in the RINEX table (check done using filename)
rs = cnn.query(
'SELECT * FROM rinex WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' AND "Filename" = \'%s\''
% (NetworkCode, StationCode, filename))
if rs.ntuples() == 0:
# no record found, possible new rinex file for this day
with pyRinex.ReadRinex(NetworkCode, StationCode, rinex) as rinexinfo:
date = pyDate.Date(year=year, doy=doy)
# verify that the rinex is from this date and that is not a multiday file
if verify_rinex_date_multiday(cnn, date, rinexinfo, Config):
try:
# create the insert statement
cnn.insert('rinex', rinexinfo.record)
event = pyEvents.Event(
Description='Archived crinex file %s added to the database.' % (rinex),
EventType='info',
StationCode=StationCode,
NetworkCode=NetworkCode,
Year=date.year,
DOY=date.doy)
cnn.insert_event(event)
except dbConnection.dbErrInsert:
# insert duplicate values: a rinex file with different name but same interval and completion %
# discard file
cnn.begin_transac()
event = pyEvents.Event(
Description='Crinex file %s was removed from the archive (and not added to db) because '
'it matched the interval and completion of an already existing file.' % rinex,
EventType='info',
StationCode=StationCode,
NetworkCode=NetworkCode,
Year=date.year,
DOY=date.doy)
cnn.insert_event(event)
rinexinfo.move_origin_file(os.path.join(Config.repository_data_reject,
'duplicate_insert/%i/%03i' % (year, doy)))
cnn.commit_transac()
except (pyRinex.pyRinexExceptionBadFile, pyRinex.pyRinexExceptionSingleEpoch) as e:
try:
filename = Utils.move(rinex, os.path.join(data_reject, os.path.basename(rinex)))
except OSError:
# permission denied: could not move file out of the archive->return error in an orderly fashion
return traceback.format_exc() + ' processing rinex: %s (%s.%s %s %s) using node %s' \
% (rinex, NetworkCode, StationCode, str(year), str(doy), platform.node())
e.event['Description'] = 'During %s, file moved to %s: %s' \
% (os.path.basename(rinex), filename, e.event['Description'])
e.event['StationCode'] = StationCode
e.event['NetworkCode'] = NetworkCode
e.event['Year'] = year
e.event['DOY'] = doy
cnn.insert_event(e.event)
return
except pyRinex.pyRinexException as e:
if cnn.active_transaction:
cnn.rollback_transac()
e.event['Description'] = e.event['Description'] + ' during ' + rinex
e.event['StationCode'] = StationCode
e.event['NetworkCode'] = NetworkCode
e.event['Year'] = year
e.event['DOY'] = doy
cnn.insert_event(e.event)
return
except Exception:
if cnn.active_transaction:
cnn.rollback_transac()
return traceback.format_exc() + ' processing rinex: %s (%s.%s %s %s) using node %s' \
% (rinex, NetworkCode, StationCode, str(year), str(doy), platform.node())
def obtain_otl(NetworkCode, StationCode):
errors = ''
x = []
y = []
z = []
try:
cnn = dbConnection.Cnn("gnss_data.cfg")
Config = pyOptions.ReadOptions("gnss_data.cfg")
pyArchive = pyArchiveStruct.RinexStruct(cnn)
# assumes that the files in the db are correct. We take 10 records from the time span (evenly spaced)
count = cnn.query('SELECT count(*) as cc FROM rinex_proc as r WHERE "NetworkCode" = \'%s\' AND '
'"StationCode" = \'%s\'' % (NetworkCode, StationCode))
count = count.dictresult()
if count[0]['cc'] >= 10:
stn = cnn.query('SELECT * FROM (SELECT *, row_number() OVER '
'(PARTITION BY "NetworkCode", "StationCode") as rnum, '
'count(*) OVER (PARTITION BY "NetworkCode", "StationCode") as cc FROM rinex_proc) as rinex '
'WHERE rinex."NetworkCode" = \'%s\' AND rinex."StationCode" = \'%s\' '
'AND rinex.rnum %% (rinex.cc/10) = 0 ORDER BY rinex."ObservationSTime"'
% (NetworkCode, StationCode))
elif count[0]['cc'] < 10:
stn = cnn.query('SELECT * FROM (SELECT row_number() OVER() as rnum, r.* FROM rinex_proc as r '
'WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' '
'ORDER BY "ObservationSTime") AS rr ' % (NetworkCode, StationCode))
else:
return 'Station %s.%s had no rinex files in the archive. Please check the database for problems.' \
% (NetworkCode, StationCode)
tblrinex = stn.dictresult()
for dbRinex in tblrinex:
# obtain the path to the crinex
file = pyArchive.build_rinex_path(NetworkCode, StationCode, dbRinex['ObservationYear'],
dbRinex['ObservationDOY'])
with pyRinex.ReadRinex(dbRinex['NetworkCode'], dbRinex['StationCode'],
os.path.join(Config.archive_path, file)) as Rinex:
# read the crinex
try:
# run ppp without otl and met and in non-strict mode
with pyPPP.RunPPP(Rinex, '', Config.options, Config.sp3types, Config.sp3altrn,
Rinex.antOffset, strict=False, apply_met=False,
clock_interpolation=True) as ppp:
ppp.exec_ppp()
x.append(ppp.x)
y.append(ppp.y)
z.append(ppp.z)
errors = errors + 'PPP -> %s.%s: %.3f %.3f %.3f\n' \
% (NetworkCode, StationCode, ppp.x, ppp.y, ppp.z)
except (pySp3.pySp3Exception, pyClk.pyClkException, pyPPP.pyRunPPPException):
# try autonomous solution
try:
brdc = pyBrdc.GetBrdcOrbits(Config.brdc_path, Rinex.date, Rinex.rootdir)
Rinex.auto_coord(brdc, chi_limit=1000)
x.append(Rinex.x)
y.append(Rinex.y)
z.append(Rinex.z)
except Exception as e:
errors = errors + str(e) + '\n'
continue
except (IOError, pyRinex.pyRinexException, pyRinex.pyRinexExceptionBadFile) as e:
# problem loading this file, try another one
errors = errors + str(e) + '\n'
continue
except Exception:
return traceback.format_exc() + ' processing: %s.%s using node %s' \
% (NetworkCode, StationCode, platform.node())
# average the x y z values
if len(x) > 0:
if len(x) > 1:
x = numpy.array(x)
y = numpy.array(y)
z = numpy.array(z)
x = numpy.mean(x[abs(x - numpy.mean(x)) < 2 * numpy.std(x)])
y = numpy.mean(y[abs(y - numpy.mean(y)) < 2 * numpy.std(y)])
z = numpy.mean(z[abs(z - numpy.mean(z)) < 2 * numpy.std(z)])
else:
x = x[0]
y = y[0]
z = z[0]
lat, lon, h = ecef2lla([x,y,z])
# calculate the otl parameters if the auto_coord returned a valid position
errors = errors + 'Mean -> %s.%s: %.3f %.3f %.3f\n' % (NetworkCode, StationCode, x, y, z)
otl = pyOTL.OceanLoading(StationCode, Config.options['grdtab'], Config.options['otlgrid'])
coeff = otl.calculate_otl_coeff(x=x, y=y, z=z)
# update record in the database
cnn.query('UPDATE stations SET "auto_x" = %.3f, "auto_y" = %.3f, "auto_z" = %.3f, '
'"lat" = %.8f, "lon" = %.8f, "height" = %.3f, '
'"Harpos_coeff_otl" = \'%s\' '
'WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\''
% (x, y, z, lat[0], lon[0], h[0], coeff, NetworkCode, StationCode))
else:
outmsg = 'Could not obtain a coordinate/otl coefficients for ' + NetworkCode + ' ' + StationCode + \
' after 20 tries. Maybe there where few valid RINEX files or could not find an ephemeris file. ' \
'Debug info and errors follow:\n' + errors
return outmsg
except pyOTL.pyOTLException as e:
return "Error while calculating OTL for %s.%s: %s\n" % (NetworkCode, StationCode, str(e)) + \
'Debug info and errors follow: \n' + errors
except Exception:
# print 'problem!' + traceback.format_exc()
outmsg = traceback.format_exc() + ' processing otl: %s.%s using node %s\n' \
% (NetworkCode, StationCode, platform.node()) \
+ 'Debug info and errors follow: \n' + errors
return outmsg
def insert_stninfo(NetworkCode, StationCode, stninfofile):
errors = []
try:
cnn = dbConnection.Cnn("gnss_data.cfg")
except Exception:
return traceback.format_exc() + ' insert_stninfo: ' + NetworkCode + ' ' + StationCode + \
' using node ' + platform.node()
try:
stnInfo = pyStationInfo.StationInfo(cnn, NetworkCode, StationCode, allow_empty=True)
stninfo = stnInfo.parse_station_info(stninfofile)
except pyStationInfo.pyStationInfoException:
return traceback.format_exc() + ' insert_stninfo: ' + NetworkCode + ' ' + StationCode + \
' using node ' + platform.node()
# DDG: 18-Feb-2019 used to have some code here to force the insertion of receivers and antennas
# this is not done anymore, and receivers and antennas should exists in the corresponding tables before inserting
# otherwise, a python exception will be raised.
# ready to insert stuff to station info table
for stn in stninfo:
if stn.get('StationCode').lower() == StationCode:
try:
stnInfo.InsertStationInfo(stn)
except pyStationInfo.pyStationInfoException as e:
errors.append(str(e))
except Exception:
errors.append(traceback.format_exc() + ' insert_stninfo: ' + NetworkCode + ' ' + StationCode +
' using node ' + platform.node())
continue
if not errors:
return
else:
return '\n\n'.join(errors)
def remove_from_archive(cnn, record, Rinex, Config):
# do not make very complex things here, just move it out from the archive
retry_folder = os.path.join(Config.repository_data_in_retry,
'inconsistent_ppp_solution/' + Rinex.date.yyyy() + '/' + Rinex.date.ddd())
pyArchive = pyArchiveStruct.RinexStruct(cnn)
pyArchive.remove_rinex(record, retry_folder)
event = pyEvents.Event(
Description='After running PPP it was found that the rinex file %s does not belong to this station. '
'This file was removed from the rinex table and moved to the repository/data_in_retry to add it '
'to the corresponding station.' % Rinex.origin_file,
NetworkCode=record['NetworkCode'],
StationCode=record['StationCode'],
EventType='warn',
Year=int(Rinex.date.year),
DOY=int(Rinex.date.doy))
cnn.insert_event(event)
def execute_ppp(record, rinex_path, h_tolerance):
NetworkCode = record['NetworkCode']
StationCode = record['StationCode']
year = record['ObservationYear']
doy = record['ObservationDOY']
try:
# try to open a connection to the database
cnn = dbConnection.Cnn("gnss_data.cfg")
Config = pyOptions.ReadOptions("gnss_data.cfg")
except Exception:
return traceback.format_exc() + ' processing rinex: %s.%s %s %s using node %s' \
% (NetworkCode, StationCode, str(year), str(doy), platform.node())
# create a temp folder in production to put the orbit in
# we need to check the RF of the orbit to see if we have this solution in the DB
try:
# check to see if record exists for this file in ppp_soln
# DDG: now read the frame from the config file
frame, _ = Utils.determine_frame(Config.options['frames'], pyDate.Date(year=year, doy=doy))
ppp_soln = cnn.query('SELECT * FROM ppp_soln WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' AND '
'"Year" = %s AND "DOY" = %s AND "ReferenceFrame" = \'%s\''
% (NetworkCode, StationCode, year, doy, frame))
if ppp_soln.ntuples() == 0:
# load the stations record to get the OTL params
rs_stn = cnn.query('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\''
% (NetworkCode, StationCode))
stn = rs_stn.dictresult()
# RINEX FILE TO BE PROCESSED
with pyRinex.ReadRinex(NetworkCode, StationCode, rinex_path) as Rinex:
if not verify_rinex_date_multiday(cnn, Rinex.date, Rinex, Config):
# the file is a multiday file. These files are not supposed to be in the archive, but, due to a bug
# in ScanArchive (now fixed - 2017-10-26) some multiday files are still in the rinex table
# the file is moved out of the archive (into the retry folder and the rinex record is deleted
event = pyEvents.Event(EventType='warn',
Description='RINEX record in database belonged to a multiday file. '
'The record has been removed from the database. '
'See previous associated event.',
StationCode=StationCode,
NetworkCode=NetworkCode,
Year=int(Rinex.date.year),
DOY=int(Rinex.date.doy))
cnn.insert_event(event)
cnn.begin_transac()
cnn.query(
'DELETE FROM gamit_soln WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' '
'AND "Year" = %i AND "DOY" = %i'
% (record['NetworkCode'], record['StationCode'], record['ObservationYear'],
record['ObservationDOY']))
cnn.query(
'DELETE FROM ppp_soln WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' '
'AND "Year" = %i AND "DOY" = %i'
% (record['NetworkCode'], record['StationCode'], record['ObservationYear'],
record['ObservationDOY']))
cnn.query(
'DELETE FROM rinex WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' '
'AND "ObservationYear" = %i AND "ObservationDOY" = %i AND "Filename" = \'%s\''
% (record['NetworkCode'], record['StationCode'], record['ObservationYear'],
record['ObservationDOY'], record['Filename']))
cnn.commit_transac()
return
stninfo = pyStationInfo.StationInfo(cnn, NetworkCode, StationCode, Rinex.date, h_tolerance=h_tolerance)
Rinex.normalize_header(stninfo, x=stn[0]['auto_x'], y=stn[0]['auto_y'], z=stn[0]['auto_z'])
with pyPPP.RunPPP(Rinex, stn[0]['Harpos_coeff_otl'], Config.options, Config.sp3types, Config.sp3altrn,
stninfo.to_dharp(stninfo.currentrecord).AntennaHeight,
hash=stninfo.currentrecord.hash) as ppp:
ppp.exec_ppp()
# verify that the solution is from the station it claims to be
Result, match, _ = ppp.verify_spatial_coherence(cnn, StationCode)
if Result:
if match[0]['NetworkCode'] == NetworkCode and match[0]['StationCode'] == StationCode:
# the match agrees with the station-day that we THINK we are processing
# this check should not be necessary if the rinex went through Archive Service, since we
# already match rinex vs station
# but it's still here to prevent that a rinex imported by ScanArchive (which assumes the
# rinex files belong to the network/station of the folder) doesn't get into the PPP table
# if it's not of the station it claims to be.
# insert record in DB
cnn.insert('ppp_soln', ppp.record)
# DDG: Eric's request to generate a date of PPP solution
event = pyEvents.Event(Description='A new PPP solution was created for frame ' + ppp.frame,
NetworkCode=NetworkCode,
StationCode=StationCode,
Year=int(year),
DOY=int(doy))
cnn.insert_event(event)
else:
remove_from_archive(cnn, record, Rinex, Config)
else:
remove_from_archive(cnn, record, Rinex, Config)
except (pyRinex.pyRinexException, pyRinex.pyRinexExceptionBadFile, pyRinex.pyRinexExceptionSingleEpoch) as e:
e.event['StationCode'] = StationCode
e.event['NetworkCode'] = NetworkCode
e.event['Year'] = int(year)
e.event['DOY'] = int(doy)
cnn.insert_event(e.event)
except pyPPP.pyRunPPPException as e:
e.event['StationCode'] = StationCode
e.event['NetworkCode'] = NetworkCode
e.event['Year'] = int(year)
e.event['DOY'] = int(doy)
cnn.insert_event(e.event)
except pyStationInfo.pyStationInfoException as e:
e.event['StationCode'] = StationCode
e.event['NetworkCode'] = NetworkCode
e.event['Year'] = int(year)
e.event['DOY'] = int(doy)
cnn.insert_event(e.event)
except Exception:
return traceback.format_exc() + ' processing rinex: %s.%s %s %s using node %s' \
% (NetworkCode, StationCode, str(year), str(doy), platform.node())
def post_scan_rinex_job(cnn, Archive, rinex_file, rinexpath, master_list, JobServer, ignore):
valid, result = Archive.parse_archive_keys(rinex_file, key_filter=('network', 'station', 'year', 'doy'))
if valid:
NetworkCode = result['network']
StationCode = result['station']
year = result['year']
doy = result['doy']
# check the master_list
if NetworkCode + '.' + StationCode in master_list or ignore:
# check existence of network in the db
rs = cnn.query('SELECT * FROM networks WHERE "NetworkCode" = \'%s\'' % NetworkCode)
if rs.ntuples() == 0:
cnn.insert('networks', NetworkCode=NetworkCode, NetworkName='UNK')
# check existence of station in the db
rs = cnn.query('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\''
% (NetworkCode, StationCode))
if rs.ntuples() == 0:
# run grdtab to get the OTL parameters in HARPOS format and insert then in the db
# use the current rinex to get an approximate coordinate
cnn.insert('stations', NetworkCode=NetworkCode, StationCode=StationCode)
JobServer.submit(NetworkCode, StationCode, year, doy, rinexpath)
def scan_rinex(cnn, JobServer, pyArchive, archive_path, master_list, ignore):
master_list = [item['NetworkCode'] + '.' + item['StationCode'] for item in master_list]
print " >> Analyzing the archive's structure..."
pbar = tqdm(ncols=80, unit='crz', disable=None)
depfuncs = (verify_rinex_date_multiday,)
modules = ('dbConnection', 'pyDate', 'pyRinex', 'shutil', 'platform', 'datetime',
'traceback', 'pyOptions', 'pyEvents', 'Utils', 'os', 'pyRinexName')
JobServer.create_cluster(try_insert, dependencies=depfuncs, modules=modules, callback=callback_handle)
if ignore[0] == 1:
ignore = True
else:
ignore = False
for path, _, files in scandir.walk(archive_path):
for sfile in files:
# DDG issue #15: match the name of the file to a valid rinex filename
try:
_ = pyRinexName.RinexNameFormat(sfile)
# only examine valid rinex compressed files
rnx = os.path.join(path, sfile).rsplit(archive_path + '/')[1]
path2rnx = os.path.join(path, sfile)
pbar.set_postfix(crinex=rnx)
pbar.update()
post_scan_rinex_job(cnn, pyArchive, rnx, path2rnx, master_list, JobServer, ignore)
except pyRinexName.RinexNameException:
pass
JobServer.wait()
# handle any output messages during this batch
if error_message:
tqdm.write(' -- There were unhandled errors. Please check errors_pyScanArchive.log for details')
pbar.close()
def process_otl(cnn, JobServer, master_list):
print ""
print " >> Calculating coordinates and OTL for new stations..."
master_list = [item['NetworkCode'] + '.' + item['StationCode'] for item in master_list]
rs = cnn.query('SELECT "NetworkCode", "StationCode" FROM stations '
'WHERE auto_x is null OR auto_y is null OR auto_z is null OR "Harpos_coeff_otl" is null '
'AND "NetworkCode" not like \'?%\' '
'AND "NetworkCode" || \'.\' || "StationCode" IN (\'' + '\',\''.join(master_list) + '\')')
records = rs.dictresult()
pbar = tqdm(total=len(records), ncols=80, disable=None)
depfuncs = (ecef2lla,)
modules = ('dbConnection', 'pyRinex', 'pyArchiveStruct', 'pyOTL', 'pyPPP', 'numpy', 'platform', 'pySp3',
'traceback', 'pyOptions', 'pyBrdc', 'pyClk')
JobServer.create_cluster(obtain_otl, depfuncs, callback_handle, progress_bar=pbar, modules=modules)
for record in records:
NetworkCode = record['NetworkCode']
StationCode = record['StationCode']
JobServer.submit(NetworkCode, StationCode)
JobServer.wait()
pbar.close()
def scan_station_info(JobServer, pyArchive, archive_path, master_list):
print " >> Searching for station info files in the archive..."
stninfo, path2stninfo = pyArchive.scan_archive_struct_stninfo(archive_path)
print " >> Processing Station Info files..."
master_list = [item['NetworkCode'] + '.' + item['StationCode'] for item in master_list]
pbar = tqdm(total=len(stninfo), ncols=80, disable=None)
modules = ('dbConnection', 'pyStationInfo', 'sys', 'datetime', 'pyDate', 'platform', 'traceback')
JobServer.create_cluster(insert_stninfo, callback=callback_handle, progress_bar=pbar, modules=modules)
for stninfofile, stninfopath in zip(stninfo,path2stninfo):
valid, result = pyArchive.parse_archive_keys(stninfofile, key_filter=('network', 'station'))
if valid:
NetworkCode = result['network']
StationCode = result['station']
if NetworkCode + '.' + StationCode in master_list:
# we were able to get the network and station code, add it to the database
JobServer.submit(NetworkCode, StationCode, stninfopath)
JobServer.wait()
pbar.close()
def scan_station_info_man(cnn, pyArchive, stn_info_path, stations, stn_info_net, stdin=None):
# input "stations" has a list in net.stnm format
print " >> Manual scan of station info files in " + stn_info_path
NetworkCode = stn_info_net
if stdin:
stn_info_obj = pyStationInfo.StationInfo(cnn)
stn_list = stn_info_obj.parse_station_info(stdin)
for Station in tqdm(stations, total=len(stations), disable=None):
# input "stations" has a list in net.stnm format
if Station['StationCode'] in [stn['StationCode'].lower() for stn in stn_list]:
tqdm.write(" >> Processing %s using network code %s" % (Station['StationCode'], NetworkCode))
out = insert_stninfo(NetworkCode, Station['StationCode'], stdin)
if out:
tqdm.write(out)
else:
tqdm.write(' >> Station %s.%s was not found in the station info file %s' %
(Station['NetworkCode'], Station['StationCode'], 'standard input'))
else:
if os.path.isfile(stn_info_path):
path2stninfo = [stn_info_path]
else:
_, path2stninfo = pyArchive.scan_archive_struct_stninfo(stn_info_path)
print " >> Found %i station information files." % (len(path2stninfo))
for stninfopath in path2stninfo:
stn_info_obj = pyStationInfo.StationInfo(cnn)
stn_list = stn_info_obj.parse_station_info(stninfopath)
for Station in tqdm(stations, total=len(stations), disable=None):
# input "stations" has a list in net.stnm format
if Station['StationCode'] in [stn['StationCode'].lower() for stn in stn_list]:
tqdm.write(" >> Processing %s using network code %s" % (Station['StationCode'], NetworkCode))
out = insert_stninfo(NetworkCode, Station['StationCode'], stninfopath)
if out:
tqdm.write(out)
else:
tqdm.write(' >> Station %s.%s was not found in the station info file %s' %
(Station['NetworkCode'], Station['StationCode'], stninfopath))
return
def hash_check(cnn, master_list, sdate, edate, rehash=False, h_tolerant=0):
print " >> Running hash check to the PPP solutions..."
master_list = [item['NetworkCode'] + '.' + item['StationCode'] for item in master_list]
ppp_soln = cnn.query('SELECT * FROM ppp_soln '
'WHERE "NetworkCode" || \'.\' || "StationCode" IN (\'' + '\',\''.join(master_list) + '\') '
'AND "Year" || \' \' || to_char("DOY", \'fm000\') '
'BETWEEN \'' + sdate.yyyyddd() + '\' AND \'' + (edate+1).yyyyddd() + '\' '
'ORDER BY "Year", "DOY", "NetworkCode", "StationCode"')
tbl = ppp_soln.dictresult()
archive = pyArchiveStruct.RinexStruct(cnn)
# check the hash values if specified
if not rehash:
print ' -- Checking hash values.'
else:
print ' -- Rehashing all records. This may take a while...'
for soln in tqdm(tbl, ncols=80, disable=None):
# load station info object
try:
# lookup for the rinex_proc record
rinex = archive.get_rinex_record(NetworkCode=soln['NetworkCode'], StationCode=soln['StationCode'],
ObservationYear=soln['Year'], ObservationDOY=soln['DOY'])
if not rinex:
# if no records, print warning
tqdm.write(" -- Could not find RINEX for %s.%s %i %03i. PPP solution will be deleted."
% (soln['NetworkCode'], soln['StationCode'], soln['Year'], soln['DOY']))
cnn.delete('ppp_soln', soln)
else:
# select the first record
rinex = rinex[0]
dd = rinex['ObservationSTime'] + (rinex['ObservationETime'] - rinex['ObservationSTime']) / 2
stninfo = pyStationInfo.StationInfo(cnn, soln['NetworkCode'], soln['StationCode'],
pyDate.Date(datetime=dd),
h_tolerance=h_tolerant)
if stninfo.currentrecord.hash != soln['hash']:
if not rehash:
tqdm.write(" -- Hash value for %s.%s %i %03i does not match with Station Information hash. "
"PPP coordinate will be recalculated."
% (soln['NetworkCode'], soln['StationCode'], soln['Year'], soln['DOY']))
cnn.delete('ppp_soln', soln)
else:
tqdm.write(" -- %s.%s %i %03i has been rehashed."
% (soln['NetworkCode'], soln['StationCode'], soln['Year'], soln['DOY']))
cnn.update('ppp_soln', soln, hash=stninfo.currentrecord.hash)
except pyStationInfo.pyStationInfoException as e:
tqdm.write(str(e))
except Exception:
raise
if not rehash:
print ' -- Done checking hash values.'
else:
print ' -- Done rehashing PPP records.'
def process_ppp(cnn, Config, pyArchive, archive_path, JobServer, master_list, sdate, edate, h_tolerance):
print " >> Running PPP on the RINEX files in the archive..."
master_list = [item['NetworkCode'] + '.' + item['StationCode'] for item in master_list]
# for each rinex in the db, run PPP and get a coordinate
rs_rnx = cnn.query('SELECT rinex.* FROM rinex_proc as rinex '
'LEFT JOIN ppp_soln ON '
'rinex."NetworkCode" = ppp_soln."NetworkCode" AND '
'rinex."StationCode" = ppp_soln."StationCode" AND '
'rinex."ObservationYear" = ppp_soln."Year" AND '
'rinex."ObservationDOY" = ppp_soln."DOY" '
'WHERE ppp_soln."NetworkCode" is null AND '
'rinex."NetworkCode" || \'.\' || rinex."StationCode" IN (\'' + '\',\''.join(master_list) + '\') '
'AND rinex."ObservationSTime" BETWEEN \''
+ sdate.yyyymmdd() + '\' AND \'' + (edate+1).yyyymmdd() + '\' '
'ORDER BY "ObservationSTime"')
tblrinex = rs_rnx.dictresult()
pbar = tqdm(total=len(tblrinex), ncols=80, disable=None)
modules = ('dbConnection', 'pyRinex', 'pyPPP', 'pyStationInfo', 'pyDate', 'pySp3', 'os', 'platform',
'pyArchiveStruct', 'traceback', 'pyOptions', 'pyEvents', 'Utils')
depfuncs = (remove_from_archive, verify_rinex_date_multiday)
JobServer.create_cluster(execute_ppp, depfuncs, callback=callback_handle, progress_bar=pbar, modules=modules)
for record in tblrinex:
rinex_path = pyArchive.build_rinex_path(record['NetworkCode'], record['StationCode'],
record['ObservationYear'], record['ObservationDOY'])
# add the base dir
rinex_path = os.path.join(archive_path, rinex_path)
JobServer.submit(record, rinex_path, h_tolerance)
JobServer.wait()
pbar.close()
# print a summary of the events generated by the run
print_scan_archive_summary(cnn)
def print_scan_archive_summary(cnn):
# find the last event in the executions table
exec_date = cnn.query_float('SELECT max(exec_date) as mx FROM executions WHERE script = \'ScanArchive.py\'')
info = cnn.query_float('SELECT count(*) as cc FROM events WHERE "EventDate" >= \'%s\' AND "EventType" = \'info\''
% exec_date[0][0])
erro = cnn.query_float('SELECT count(*) as cc FROM events WHERE "EventDate" >= \'%s\' AND "EventType" = \'error\''
% exec_date[0][0])
warn = cnn.query_float('SELECT count(*) as cc FROM events WHERE "EventDate" >= \'%s\' AND "EventType" = \'warn\''
% exec_date[0][0])
print ' >> Summary of events for this run:'
print ' -- info : %i' % info[0][0]
print ' -- errors : %i' % erro[0][0]
print ' -- warnings: %i' % warn[0][0]
def export_station(cnn, stnlist, pyArchive, archive_path, dataless):
# loop collecting the necessary information
print " >> Collecting the information for each station in the list..."
pbar1 = tqdm(total=len(stnlist), ncols=160, position=0, disable=None)
for stn in tqdm(stnlist, ncols=80, disable=None):
NetworkCode = stn['NetworkCode']
StationCode = stn['StationCode']
rs_stn = cnn.query('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\''
% (NetworkCode, StationCode))
stn = rs_stn.dictresult()[0]
pbar1.set_postfix(Station='%s.%s' % (NetworkCode, StationCode))
pbar1.update()
export_dic = dict()
# list of rinex files
rinex_lst = cnn.query('SELECT * FROM rinex WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' '
'ORDER BY "ObservationYear", "ObservationDOY"' % (NetworkCode, StationCode))
rinex_lst = rinex_lst.dictresult()
# rinex_lst = pyArchive.get_rinex_record(NetworkCode=NetworkCode, StationCode=StationCode)
# list of metadata
stninfo = pyStationInfo.StationInfo(cnn, NetworkCode, StationCode, allow_empty=True)
export_dic['NetworkCode'] = NetworkCode
export_dic['StationCode'] = StationCode
export_dic['StationInfo'] = stninfo
if stn['lat'] and stn['auto_x'] and stn['Harpos_coeff_otl']:
export_dic['lat'] = stn['lat']
export_dic['lon'] = stn['lon']
export_dic['height'] = stn['height']
export_dic['x'] = stn['auto_x']
export_dic['y'] = stn['auto_y']
export_dic['z'] = stn['auto_z']
export_dic['otl'] = stn['Harpos_coeff_otl']
export_dic['dome'] = stn['dome']
export_dic['max_dist'] = stn['max_dist']
else:
tqdm.write(' -- Warning! %s.%s has incomplete station data' % (NetworkCode, StationCode))
# create dir for the rinex files
dest = 'production/export/%s.%s' % (NetworkCode, StationCode)
if not os.path.isdir(dest):
os.makedirs(dest)
rinex_dict = []
pbar2 = tqdm(total=len(rinex_lst), ncols=160, position=1, disable=None)
for rnx in rinex_lst:
# make a copy of each file
rnx_path = pyArchive.build_rinex_path(NetworkCode=NetworkCode, StationCode=StationCode,
ObservationYear=rnx['ObservationYear'],
ObservationDOY=rnx['ObservationDOY'], filename=rnx['Filename'])
try:
if not dataless:
# only copy the files if dataless == False
shutil.copy(os.path.join(archive_path, rnx_path), os.path.join(dest, os.path.basename(rnx_path)))
rinex_dict = rinex_dict + [rnx]
except IOError:
tqdm.write(' -- Warning! File not found in archive: %s' % (os.path.join(archive_path, rnx_path)))
pbar2.set_postfix(Date='%s %03s' % (rnx['ObservationYear'], rnx['ObservationDOY']))
pbar2.update()
pbar2.close()
export_dic['files'] = len(rinex_dict)
export_dic['rinex'] = rinex_dict
with open(os.path.join(dest, '%s.%s.json') % (NetworkCode, StationCode), 'w') as file:
json.dump(export_dic, file, indent=4, sort_keys=True, cls=Encoder)
# make the zip file with the station
with zipfile.ZipFile('%s.%s.zip' % (NetworkCode, StationCode),
"w", zipfile.ZIP_DEFLATED, allowZip64=True) as zf:
for root, _, filenames in os.walk(dest):
for name in filenames:
name = os.path.join(root, name)
name = os.path.normpath(name)
zf.write(name, os.path.basename(name))
shutil.rmtree(dest)
pbar1.close()
print ""
def import_station(cnn, args):
files = args[1:]
network = args[0]
archive = pyArchiveStruct.RinexStruct(cnn)
print " >> Processing input files..."
for ff in tqdm(files, ncols=160, disable=None):
filename = os.path.basename(ff)
if filename.endswith('.zip'):
fileparts = filename.split('.')
NetworkCode = fileparts[0].lower()
StationCode = fileparts[1].lower()
path = 'production/archive/' + str(uuid.uuid4())
try:
# process each station file
zipfile.ZipFile(ff).extractall(path)
jfile = glob.glob(os.path.join(path, '*.json'))
# DDG: may want to consider other compression formats
rnx_files = [f for f_ in [glob.glob(os.path.join(path, e)) for e in ['*.gz', '*d.Z']] for f in f_]
station = json.load(open(jfile[0], 'r'))
spatial = pyPPP.PPPSpatialCheck([station['lat']], [station['lon']], [station['height']])
result, match, closest_stn = spatial.verify_spatial_coherence(cnn, StationCode)
if result:
tqdm.write(' -- Found external station %s.%s in network %s (distance %.3f)'
% (NetworkCode, StationCode, match[0]['NetworkCode'], match[0]['distance']))
# ask the user what to do with the data
r = raw_input('\n Insert data to this station?: y/n ')
if r.lower() == 'y':
try_insert_files(cnn, archive, station, match[0]['NetworkCode'], StationCode, rnx_files)
else:
if len(match) == 1:
tqdm.write(' -- External station %s.%s not found. Possible match is %s.%s: %.3f m'
% (NetworkCode, StationCode, match[0]['NetworkCode'],
match[0]['StationCode'], match[0]['distance']))
# ask the user what to do with the data
r = raw_input('\n Insert new station %s with network code %s '
'or add this data to station %s.%s?: (i)nsert new/(a)dd '
% (StationCode, network, match[0]['NetworkCode'], match[0]['StationCode']))
if r.lower() == 'i':
if insert_station(cnn, network, station):
try_insert_files(cnn, archive, station, network, StationCode, rnx_files)
else:
# if data is added to existing station, replace the StationCode with the matched
# StationCode the rinexobj will apply the naming convention to the file
try_insert_files(cnn, archive, station, match[0]['NetworkCode'],
match[0]['StationCode'], rnx_files)
elif len(match) > 1:
tqdm.write(' -- External station %s.%s not found. Possible matches are %s'
% (NetworkCode, StationCode,
', '.join(['%s.%s: %.3f m' %
(m['NetworkCode'], m['StationCode'], m['distance']) for m in match])))
options = ', '.join(['%s.%s (%i)' % (m['NetworkCode'], m['StationCode'], i+1)
for i, m in enumerate(match)])
r = raw_input('\n Insert new station %s with network code %s '
'or add this data as %s: (i)nsert new/(number)' % (StationCode, network, options))
if r.lower() == 'i':
if insert_station(cnn, network, station):
try_insert_files(cnn, archive, station, network, StationCode, rnx_files)
else:
try:
i = int(r)
try_insert_files(cnn, archive, station, match[i]['NetworkCode'],
match[i]['StationCode'], rnx_files)
except ValueError:
tqdm.write(' -- Selected value is not numeric!')
else:
tqdm.write(' -- External station %s.%s not found. Closest station is %s.%s: %.3f m'
% (NetworkCode, StationCode, closest_stn[0]['NetworkCode'],
closest_stn[0]['StationCode'], closest_stn[0]['distance']))
# ask the user what to do with the data
r = raw_input('\n Insert new station with default station network %s?: y/n ' % network)
if r.lower() == 'y':
if insert_station(cnn, network, station):
# now that station was created, insert files
try_insert_files(cnn, archive, station, network,
StationCode, rnx_files)
# delete all files once we're done.
shutil.rmtree(path)
except zipfile.BadZipfile:
tqdm.write(' -- Bad zipfile detected: %s' % ff)
def insert_station(cnn, network, station):
# check that another station with same name doesn't exist in this network
rstn = cnn.query_float('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND '
'"StationCode" = \'%s\'' % (network, station['StationCode']))
if len(rstn) > 0:
tqdm.write(' -- Station code %s already exists in network %s. Cannot insert station'
% (station['StationCode'], network))
return False
else:
# check if network exists
if not cnn.query_float('SELECT * FROM networks WHERE "NetworkCode" = \'%s\'' % network):
cnn.insert('networks', NetworkCode=network)
# insert the station and metadata in the json file
cnn.insert('stations',
NetworkCode=network,
StationCode=station['StationCode'],
auto_x=station['x'], auto_y=station['y'], auto_z=station['z'],
Harpos_coeff_otl=station['otl'],
lat=station['lat'],
lon=station['lon'],
height=station['height'],
max_dist=station['max_dist'] if 'max_dist' in station.keys() else None,
dome=station['dome'] if 'dome' in station.keys() else None)
return True
def try_insert_files(cnn, archive, station, NetworkCode, StationCode, rinex):
import_stninfo = station['StationInfo']
stninfo = pyStationInfo.StationInfo(cnn, NetworkCode, StationCode, allow_empty=True)
if rinex:
# a station file with rinex data in it. Attempt to insert the data and the associated station information
for rnx in rinex:
with pyRinex.ReadRinex(NetworkCode, StationCode, rnx) as rinexinfo:
inserted = archive.insert_rinex(rinexobj=rinexinfo)
if not inserted:
# display an error message
tqdm.write(' -- %s.%s (%s) not imported: already existed in database.'
% (NetworkCode, StationCode, os.path.basename(rnx)))
else:
tqdm.write(' -- %s.%s (%s) successfully imported into database.'
% (NetworkCode, StationCode, os.path.basename(rnx)))
try:
pyStationInfo.StationInfo(cnn, NetworkCode, StationCode, rinexinfo.date)
except pyStationInfo.pyStationInfoException:
# station info not in db! import the corresponding station info
stninfo_inserted = False
for record in import_stninfo:
import_record = pyStationInfo.StationInfoRecord(NetworkCode, StationCode, record)
# DDG: to avoid problems with files that contain two different station info records, we check
# that import_record.DateEnd.datetime() is not less than the first observation of the rinex
if rinexinfo.datetime_firstObs >= import_record.DateStart.datetime() and \
not import_record.DateEnd.datetime() <= rinexinfo.datetime_firstObs:
if rinexinfo.datetime_lastObs > import_record.DateEnd.datetime():
tqdm.write(' WARNING! RINEX file %s has an end data past the station info record. '
'Maybe this file has a receiver/antenna change in the middle.'
% os.path.basename(rnx))
# the record we are looking for
try:
stninfo.InsertStationInfo(import_record)
stninfo_inserted = True
except pyStationInfo.pyStationInfoException as e:
tqdm.write(' ' + str(e))
if not stninfo_inserted:
tqdm.write(' Could not find a valid station info in the database or in the station '
'package. File remains in database without metadata.')
else:
# a station file without rinex data
# attempt to merge the station information
for record in import_stninfo:
import_record = pyStationInfo.StationInfoRecord(NetworkCode, StationCode, record)
try:
stninfo.InsertStationInfo(import_record)
tqdm.write(' -- Successful insert: %s -> %s' +
str(import_record['DateStart']), str(import_record['DateEnd']))
except pyStationInfo.pyStationInfoException as e:
tqdm.write(' -- ' + str(e))
def get_rinex_file(cnn, stnlist, date, Archive_path):
archive = pyArchiveStruct.RinexStruct(cnn)
print " >> Getting stations from db..."
for stn in tqdm(stnlist, ncols=80, disable=None):
NetworkCode = stn['NetworkCode']
StationCode = stn['StationCode']
rinex = archive.build_rinex_path(NetworkCode, StationCode, date.year, date.doy)
if rinex is not None:
rinex = os.path.join(Archive_path, rinex)
with pyRinex.ReadRinex(NetworkCode, StationCode, rinex, False) as Rinex: # type: pyRinex.ReadRinex
StationInfo = pyStationInfo.StationInfo(cnn, NetworkCode, StationCode, Rinex.date)
Rinex.normalize_header(StationInfo)
Rinex.compress_local_copyto('./')
else:
tqdm.write(" -- %s not found for %s.%s" % (date.yyyyddd(), NetworkCode, StationCode))
def main():
parser = argparse.ArgumentParser(description='Archive operations Main Program')
parser.add_argument('stnlist', type=str, nargs='+', metavar='all|net.stnm',
help="List of networks/stations to process given in [net].[stnm] format or just [stnm] "
"(separated by spaces; if [stnm] is not unique in the database, all stations with that "
"name will be processed). Use keyword 'all' to process all stations in the database. "
"If [net].all is given, all stations from network [net] will be processed. "
"Alternatively, a file with the station list can be provided.")
parser.add_argument('-rinex', '--rinex', metavar='{ignore_stnlist}', type=int, nargs=1, default=None,
help="Scan the current archive for RINEX 2/3 files and add them to the database if missing. "
"Station list will be used to filter specific networks and stations if {ignore_stnlist} = "
"0. For example: ScanArchive [net].all -rinex 0 will process all the stations in network "
"[net], but networks and stations have to exist in the database. "
"If ScanArchive [net].all -rinex 1 the station list will be ignored and everything in the "
"archive will be checked (and added to the db if missing) even if networks and stations "
"don't exist. Networks and stations will be added if they don't exist.")
parser.add_argument('-otl', '--ocean_loading', action='store_true',
help="Calculate ocean loading coefficients using FES2004. To calculate FES2014b coefficients, "
"use OTL_FES2014b.py")
parser.add_argument('-stninfo', '--station_info', nargs='*', metavar='argument',
help="Insert station information to the database. "
"If no arguments are given, then scan the archive for station info files and use their "
"location (folder) to determine the network to use during insertion. "
"Only stations in the station list will be processed. "
"If a filename is provided, then scan that file only, in which case a second argument "
"specifies the network to use during insertion. Eg: -stninfo ~/station.info arg. "
"In cases where multiple networks are being processed, the network argument will be used "
"to desambiguate station code conflicts. "
"Eg: ScanArchive all -stninfo ~/station.info arg -> if a station named igm1 exists in "
"networks 'igs' and 'arg', only 'arg.igm1' will get the station information insert. "
"Use keyword 'stdin' to read the station information data from the pipeline.")
parser.add_argument('-export', '--export_station', nargs='?', metavar='[dataless seed]', default=None, const=False,
help="Export a station from the local database that can be imported into another "
"Parallel.GAMIT system using the -import option."
"One file is created per station in the current directory. If the [dataless seed] switch "
"is passed (e.g. -export true), then the export seed is created without data "
"(only metadata included, i.e. station info, station record, etc).")
parser.add_argument('-import', '--import_station', nargs='+', type=str, metavar=('{default net}', '{zipfiles}'),
help="Import a station from zipfiles produced by another Parallel.GAMIT system. "
"Wildcards are accepted to import multiple zipfiles. If station does not exist, use "
"{default net} to specify the network where station should be added to. If {default net} "
"does not exit, it will be created. Station list is ignored.")
parser.add_argument('-get', '--get_from_archive', nargs=1, metavar='{date}',
help="Get the specified station from the archive and copy it to the current directory. Fix it "
"to match the station information in the database.")
parser.add_argument('-ppp', '--ppp', nargs='*', metavar='argument',
help="Run ppp on the rinex files in the database. Append [date_start] and (optionally) "
"[date_end] to limit the range of the processing. Allowed formats are yyyy_doy, wwww-d, "
"fyear or yyyy/mm/dd. Append keyword 'hash' to the end to check the PPP hash values "
"against the station information records. If hash doesn't match, recalculate the PPP "
"solutions.")
parser.add_argument('-rehash', '--rehash', nargs='*', metavar='argument',
help="Check PPP hash against station information hash. Rehash PPP solutions to match the "
"station information hash without recalculating the PPP solution. Optionally append "
"[date_start] and (optionally) [date_end] to limit the rehashing time window. "
"Allowed formats are yyyy.doy or yyyy/mm/dd.")
parser.add_argument('-tol', '--stninfo_tolerant', nargs=1, type=int, metavar='{hours}', default=[0],
help="Specify a tolerance (in hours) for station information gaps (only use for early "
"survey data). Default is zero.")
parser.add_argument('-np', '--noparallel', action='store_true', help="Execute command without parallelization.")
args = parser.parse_args()
if args.station_info is not None and (not len(args.station_info) in (0, 2)):
parser.error('-stninfo requires 0 or 2 arguments. {} given.'.format(len(args.station_info)))
Config = pyOptions.ReadOptions("gnss_data.cfg") # type: pyOptions.ReadOptions
cnn = dbConnection.Cnn("gnss_data.cfg")
# create the execution log
cnn.insert('executions', script='ScanArchive.py')
# get the station list
stnlist = Utils.process_stnlist(cnn, args.stnlist)
pyArchive = pyArchiveStruct.RinexStruct(cnn)
JobServer = pyJobServer.JobServer(Config, run_parallel=not args.noparallel,
software_sync=[Config.options['ppp_remote_local']]) # type: pyJobServer.JobServer
#########################################
if args.rinex is not None:
scan_rinex(cnn, JobServer, pyArchive, Config.archive_path, stnlist, args.rinex)
#########################################
if args.ocean_loading:
process_otl(cnn, JobServer, stnlist)
#########################################
if args.station_info is not None:
if len(args.station_info) == 0:
scan_station_info(JobServer, pyArchive, Config.archive_path, stnlist)
else:
stn_info_stdin = []
if args.station_info[0] == 'stdin':
for line in sys.stdin:
stn_info_stdin.append(line)
scan_station_info_man(cnn, pyArchive, args.station_info[0], stnlist, args.station_info[1], stn_info_stdin)
#########################################
if args.rehash is not None:
dates = []
try:
dates = process_date(args.rehash)
except ValueError as e:
parser.error(str(e))
hash_check(cnn, stnlist, dates[0], dates[1], rehash=True, h_tolerant=args.stninfo_tolerant[0])
#########################################
if args.ppp is not None:
# check other possible arguments
dates = []
do_hash = True if 'hash' in args.ppp else False
date_args = [date for date in args.ppp if date != 'hash']
try:
dates = process_date(date_args)
except ValueError as e:
parser.error(str(e))
if do_hash:
hash_check(cnn, stnlist, dates[0], dates[1], rehash=False, h_tolerant=args.stninfo_tolerant[0])
process_ppp(cnn, Config, pyArchive, Config.archive_path, JobServer, stnlist, dates[0], dates[1],
args.stninfo_tolerant[0])
#########################################
if args.export_station is not None:
export_station(cnn, stnlist, pyArchive, Config.archive_path, args.export_station)
#########################################
if args.import_station:
import_station(cnn, args.import_station)
#########################################
if args.get_from_archive:
dates = process_date(args.get_from_archive)
get_rinex_file(cnn, stnlist, dates[0], Config.archive_path)
# remove the production dir
# if os.path.isdir('production'):
# rmtree('production')
JobServer.close_cluster()
if __name__ == '__main__':
main() | gpl-3.0 |
ekasitk/sahara | sahara/utils/hacking/logging_checks.py | 6 | 4253 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
# NOTE(Kezar): this checks was copied from cinder/nova and should be one day
# appear at general hacking checks. So we need to try remember it and remove it
# when it'll be happened.
# FIXME(Kezar): may be it will be better to right in the way that introduced in
# keystone but it will need additional work and total checks refactoring.
log_translation_LI = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_LE = re.compile(
r"(.)*LOG\.(exception)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning)\(\s*(_\(|'|\")")
log_translation_LC = re.compile(
r"(.)*LOG\.(critical)\(\s*('|\")")
accepted_log_level = re.compile(
r"^LOG\.(debug|info|exception|warning|error|critical)\(")
def validate_log_translations(logical_line, filename):
"""Check if log levels has translations and if it's correct.
S369
S370
S371
S372
"""
# NOTE(Kezar): sahara/tests included because we don't require translations
# in tests. sahara/openstack/common included because it's part imported
# from oslo and we don't change it forever and ever. sahara/db/templates
# provide separate cli interface so we don't want to translate it.
ignore_dirs = ["sahara/db/templates",
"sahara/tests",
"sahara/openstack/common"]
for directory in ignore_dirs:
if directory in filename:
return
# Translations are not required in the test directory.
# This will not catch all instances of violations, just direct
# misuse of the form LOG.info('Message').
msg = "S369: LOG.info messages require translations `_LI()`!"
if log_translation_LI.search(logical_line):
yield (0, msg)
msg = ("S370: LOG.exception and LOG.error messages require "
"translations `_LE()`!")
if log_translation_LE.search(logical_line):
yield (0, msg)
msg = "S371: LOG.warning messages require translations `_LW()`!"
if log_translation_LW.search(logical_line):
yield (0, msg)
msg = "S372: LOG.critical messages require translations `_LC()`!"
if log_translation_LC.search(logical_line):
yield (0, msg)
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
S373
"""
msg = "S373 Don't translate debug level logs"
if logical_line.startswith("LOG.debug(_("):
yield(0, msg)
def accepted_log_levels(logical_line, filename):
"""In Sahara we use only 5 log levels.
This check is needed because we don't want new contributors to
use deprecated log levels.
S373
"""
# NOTE(Kezar): sahara/tests included because we don't require translations
# in tests. sahara/openstack/common included because it's part imported
# from oslo and we don't change it forever and ever. sahara/db/templates
# provide separate cli interface so we don't want to translate it.
ignore_dirs = ["sahara/db/templates",
"sahara/tests",
"sahara/openstack/common"]
for directory in ignore_dirs:
if directory in filename:
return
msg = ("S373 You used deprecated log level. Accepted log levels are "
"debug|info|warning|error|critical")
if logical_line.startswith("LOG."):
if not accepted_log_level.search(logical_line):
yield(0, msg)
| apache-2.0 |
codasus/django-blogages | blogages/django/contrib/gis/gdal/tests/test_ds.py | 233 | 10504 | import os, os.path, unittest
from django.contrib.gis.gdal import DataSource, Envelope, OGRGeometry, OGRException, OGRIndexError, GDAL_VERSION
from django.contrib.gis.gdal.field import OFTReal, OFTInteger, OFTString
from django.contrib.gis.geometry.test_data import get_ds_file, TestDS
# List of acceptable data sources.
ds_list = (TestDS('test_point', nfeat=5, nfld=3, geom='POINT', gtype=1, driver='ESRI Shapefile',
fields={'dbl' : OFTReal, 'int' : OFTInteger, 'str' : OFTString,},
extent=(-1.35011,0.166623,-0.524093,0.824508), # Got extent from QGIS
srs_wkt='GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]',
field_values={'dbl' : [float(i) for i in range(1, 6)], 'int' : range(1, 6), 'str' : [str(i) for i in range(1, 6)]},
fids=range(5)),
TestDS('test_vrt', ext='vrt', nfeat=3, nfld=3, geom='POINT', gtype='Point25D', driver='VRT',
fields={'POINT_X' : OFTString, 'POINT_Y' : OFTString, 'NUM' : OFTString}, # VRT uses CSV, which all types are OFTString.
extent=(1.0, 2.0, 100.0, 523.5), # Min/Max from CSV
field_values={'POINT_X' : ['1.0', '5.0', '100.0'], 'POINT_Y' : ['2.0', '23.0', '523.5'], 'NUM' : ['5', '17', '23']},
fids=range(1,4)),
TestDS('test_poly', nfeat=3, nfld=3, geom='POLYGON', gtype=3,
driver='ESRI Shapefile',
fields={'float' : OFTReal, 'int' : OFTInteger, 'str' : OFTString,},
extent=(-1.01513,-0.558245,0.161876,0.839637), # Got extent from QGIS
srs_wkt='GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]'),
)
bad_ds = (TestDS('foo'),
)
class DataSourceTest(unittest.TestCase):
def test01_valid_shp(self):
"Testing valid SHP Data Source files."
for source in ds_list:
# Loading up the data source
ds = DataSource(source.ds)
# Making sure the layer count is what's expected (only 1 layer in a SHP file)
self.assertEqual(1, len(ds))
# Making sure GetName works
self.assertEqual(source.ds, ds.name)
# Making sure the driver name matches up
self.assertEqual(source.driver, str(ds.driver))
# Making sure indexing works
try:
ds[len(ds)]
except OGRIndexError:
pass
else:
self.fail('Expected an IndexError!')
def test02_invalid_shp(self):
"Testing invalid SHP files for the Data Source."
for source in bad_ds:
self.assertRaises(OGRException, DataSource, source.ds)
def test03a_layers(self):
"Testing Data Source Layers."
print "\nBEGIN - expecting out of range feature id error; safe to ignore.\n"
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer, this tests DataSource.__iter__
for layer in ds:
# Making sure we get the number of features we expect
self.assertEqual(len(layer), source.nfeat)
# Making sure we get the number of fields we expect
self.assertEqual(source.nfld, layer.num_fields)
self.assertEqual(source.nfld, len(layer.fields))
# Testing the layer's extent (an Envelope), and it's properties
if source.driver == 'VRT' and (GDAL_VERSION > (1, 7, 0) and GDAL_VERSION < (1, 7, 3)):
# There's a known GDAL regression with retrieving the extent
# of a VRT layer in versions 1.7.0-1.7.2:
# http://trac.osgeo.org/gdal/ticket/3783
pass
else:
self.assertEqual(True, isinstance(layer.extent, Envelope))
self.assertAlmostEqual(source.extent[0], layer.extent.min_x, 5)
self.assertAlmostEqual(source.extent[1], layer.extent.min_y, 5)
self.assertAlmostEqual(source.extent[2], layer.extent.max_x, 5)
self.assertAlmostEqual(source.extent[3], layer.extent.max_y, 5)
# Now checking the field names.
flds = layer.fields
for f in flds: self.assertEqual(True, f in source.fields)
# Negative FIDs are not allowed.
self.assertRaises(OGRIndexError, layer.__getitem__, -1)
self.assertRaises(OGRIndexError, layer.__getitem__, 50000)
if hasattr(source, 'field_values'):
fld_names = source.field_values.keys()
# Testing `Layer.get_fields` (which uses Layer.__iter__)
for fld_name in fld_names:
self.assertEqual(source.field_values[fld_name], layer.get_fields(fld_name))
# Testing `Layer.__getitem__`.
for i, fid in enumerate(source.fids):
feat = layer[fid]
self.assertEqual(fid, feat.fid)
# Maybe this should be in the test below, but we might as well test
# the feature values here while in this loop.
for fld_name in fld_names:
self.assertEqual(source.field_values[fld_name][i], feat.get(fld_name))
print "\nEND - expecting out of range feature id error; safe to ignore."
def test03b_layer_slice(self):
"Test indexing and slicing on Layers."
# Using the first data-source because the same slice
# can be used for both the layer and the control values.
source = ds_list[0]
ds = DataSource(source.ds)
sl = slice(1, 3)
feats = ds[0][sl]
for fld_name in ds[0].fields:
test_vals = [feat.get(fld_name) for feat in feats]
control_vals = source.field_values[fld_name][sl]
self.assertEqual(control_vals, test_vals)
def test03c_layer_references(self):
"Test to make sure Layer access is still available without the DataSource."
source = ds_list[0]
# See ticket #9448.
def get_layer():
# This DataSource object is not accessible outside this
# scope. However, a reference should still be kept alive
# on the `Layer` returned.
ds = DataSource(source.ds)
return ds[0]
# Making sure we can call OGR routines on the Layer returned.
lyr = get_layer()
self.assertEqual(source.nfeat, len(lyr))
self.assertEqual(source.gtype, lyr.geom_type.num)
def test04_features(self):
"Testing Data Source Features."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer
for layer in ds:
# Incrementing through each feature in the layer
for feat in layer:
# Making sure the number of fields, and the geometry type
# are what's expected.
self.assertEqual(source.nfld, len(list(feat)))
self.assertEqual(source.gtype, feat.geom_type)
# Making sure the fields match to an appropriate OFT type.
for k, v in source.fields.items():
# Making sure we get the proper OGR Field instance, using
# a string value index for the feature.
self.assertEqual(True, isinstance(feat[k], v))
# Testing Feature.__iter__
for fld in feat: self.assertEqual(True, fld.name in source.fields.keys())
def test05_geometries(self):
"Testing Geometries from Data Source Features."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer and feature.
for layer in ds:
for feat in layer:
g = feat.geom
# Making sure we get the right Geometry name & type
self.assertEqual(source.geom, g.geom_name)
self.assertEqual(source.gtype, g.geom_type)
# Making sure the SpatialReference is as expected.
if hasattr(source, 'srs_wkt'):
self.assertEqual(source.srs_wkt, g.srs.wkt)
def test06_spatial_filter(self):
"Testing the Layer.spatial_filter property."
ds = DataSource(get_ds_file('cities', 'shp'))
lyr = ds[0]
# When not set, it should be None.
self.assertEqual(None, lyr.spatial_filter)
# Must be set a/an OGRGeometry or 4-tuple.
self.assertRaises(TypeError, lyr._set_spatial_filter, 'foo')
# Setting the spatial filter with a tuple/list with the extent of
# a buffer centering around Pueblo.
self.assertRaises(ValueError, lyr._set_spatial_filter, range(5))
filter_extent = (-105.609252, 37.255001, -103.609252, 39.255001)
lyr.spatial_filter = (-105.609252, 37.255001, -103.609252, 39.255001)
self.assertEqual(OGRGeometry.from_bbox(filter_extent), lyr.spatial_filter)
feats = [feat for feat in lyr]
self.assertEqual(1, len(feats))
self.assertEqual('Pueblo', feats[0].get('Name'))
# Setting the spatial filter with an OGRGeometry for buffer centering
# around Houston.
filter_geom = OGRGeometry('POLYGON((-96.363151 28.763374,-94.363151 28.763374,-94.363151 30.763374,-96.363151 30.763374,-96.363151 28.763374))')
lyr.spatial_filter = filter_geom
self.assertEqual(filter_geom, lyr.spatial_filter)
feats = [feat for feat in lyr]
self.assertEqual(1, len(feats))
self.assertEqual('Houston', feats[0].get('Name'))
# Clearing the spatial filter by setting it to None. Now
# should indicate that there are 3 features in the Layer.
lyr.spatial_filter = None
self.assertEqual(3, len(lyr))
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(DataSourceTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| mit |
Daniex/horizon | openstack_dashboard/dashboards/admin/instances/views.py | 8 | 7555 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.instances \
import forms as project_forms
from openstack_dashboard.dashboards.admin.instances \
import tables as project_tables
from openstack_dashboard.dashboards.project.instances import views
from openstack_dashboard.dashboards.project.instances.workflows \
import update_instance
# re-use console from project.instances.views to make reflection work
def console(args, **kvargs):
return views.console(args, **kvargs)
# re-use vnc from project.instances.views to make reflection work
def vnc(args, **kvargs):
return views.vnc(args, **kvargs)
# re-use spice from project.instances.views to make reflection work
def spice(args, **kvargs):
return views.spice(args, **kvargs)
# re-use rdp from project.instances.views to make reflection work
def rdp(args, **kvargs):
return views.rdp(args, **kvargs)
class AdminUpdateView(views.UpdateView):
workflow_class = update_instance.AdminUpdateInstance
success_url = reverse_lazy("horizon:admin:instances:index")
class AdminIndexView(tables.DataTableView):
table_class = project_tables.AdminInstancesTable
template_name = 'admin/instances/index.html'
page_title = _("Instances")
def has_more_data(self, table):
return self._more
def get_data(self):
instances = []
marker = self.request.GET.get(
project_tables.AdminInstancesTable._meta.pagination_param, None)
search_opts = self.get_filters({'marker': marker, 'paginate': True})
# Gather our tenants to correlate against IDs
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve instance project information.')
exceptions.handle(self.request, msg)
if 'project' in search_opts:
ten_filter_ids = [t.id for t in tenants
if t.name == search_opts['project']]
del search_opts['project']
if len(ten_filter_ids) > 0:
search_opts['tenant_id'] = ten_filter_ids[0]
else:
self._more = False
return []
try:
instances, self._more = api.nova.server_list(
self.request,
search_opts=search_opts,
all_tenants=True)
except Exception:
self._more = False
exceptions.handle(self.request,
_('Unable to retrieve instance list.'))
if instances:
try:
api.network.servers_update_addresses(self.request, instances,
all_tenants=True)
except Exception:
exceptions.handle(
self.request,
message=_('Unable to retrieve IP addresses from Neutron.'),
ignore=True)
# Gather our flavors to correlate against IDs
try:
flavors = api.nova.flavor_list(self.request)
except Exception:
# If fails to retrieve flavor list, creates an empty list.
flavors = []
full_flavors = SortedDict([(f.id, f) for f in flavors])
tenant_dict = SortedDict([(t.id, t) for t in tenants])
# Loop through instances to get flavor and tenant info.
for inst in instances:
flavor_id = inst.flavor["id"]
try:
if flavor_id in full_flavors:
inst.full_flavor = full_flavors[flavor_id]
else:
# If the flavor_id is not in full_flavors list,
# gets it via nova api.
inst.full_flavor = api.nova.flavor_get(
self.request, flavor_id)
except Exception:
msg = _('Unable to retrieve instance size information.')
exceptions.handle(self.request, msg)
tenant = tenant_dict.get(inst.tenant_id, None)
inst.tenant_name = getattr(tenant, "name", None)
return instances
def get_filters(self, filters):
filter_field = self.table.get_filter_field()
filter_action = self.table._meta._filter_action
if filter_action.is_api_filter(filter_field):
filter_string = self.table.get_filter_string()
if filter_field and filter_string:
filters[filter_field] = filter_string
return filters
class LiveMigrateView(forms.ModalFormView):
form_class = project_forms.LiveMigrateForm
template_name = 'admin/instances/live_migrate.html'
context_object_name = 'instance'
success_url = reverse_lazy("horizon:admin:instances:index")
page_title = _("Live Migrate")
def get_context_data(self, **kwargs):
context = super(LiveMigrateView, self).get_context_data(**kwargs)
context["instance_id"] = self.kwargs['instance_id']
return context
@memoized.memoized_method
def get_hosts(self, *args, **kwargs):
try:
return api.nova.host_list(self.request)
except Exception:
redirect = reverse("horizon:admin:instances:index")
msg = _('Unable to retrieve host information.')
exceptions.handle(self.request, msg, redirect=redirect)
@memoized.memoized_method
def get_object(self, *args, **kwargs):
instance_id = self.kwargs['instance_id']
try:
return api.nova.server_get(self.request, instance_id)
except Exception:
redirect = reverse("horizon:admin:instances:index")
msg = _('Unable to retrieve instance details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
initial = super(LiveMigrateView, self).get_initial()
_object = self.get_object()
if _object:
current_host = getattr(_object, 'OS-EXT-SRV-ATTR:host', '')
initial.update({'instance_id': self.kwargs['instance_id'],
'current_host': current_host,
'hosts': self.get_hosts()})
return initial
class DetailView(views.DetailView):
redirect_url = 'horizon:admin:instances:index'
| apache-2.0 |
x303597316/hue | desktop/core/ext-py/Paste-2.0.1/tests/test_auth/test_auth_cookie.py | 47 | 1527 | # (c) 2005 Clark C. Evans
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from six.moves import xrange
import six
from paste.auth import cookie
from paste.wsgilib import raw_interactive, dump_environ
from paste.response import header_value
from paste.httpexceptions import *
def build(application,setenv, *args, **kwargs):
def setter(environ, start_response):
save = environ['paste.auth.cookie'].append
for (k,v) in setenv.items():
save(k)
environ[k] = v
return application(environ, start_response)
return cookie.middleware(setter,*args,**kwargs)
def test_noop():
app = build(dump_environ,{})
(status,headers,content,errors) = \
raw_interactive(app)
assert not header_value(headers,'Set-Cookie')
def test_basic(key='key', val='bingles'):
app = build(dump_environ,{key:val})
(status,headers,content,errors) = \
raw_interactive(app)
value = header_value(headers,'Set-Cookie')
assert "Path=/;" in value
assert "expires=" not in value
cookie = value.split(";")[0]
(status,headers,content,errors) = \
raw_interactive(app,{'HTTP_COOKIE': cookie})
expected = ("%s: %s" % (key,val.replace("\n","\n ")))
if six.PY3:
expected = expected.encode('utf8')
assert expected in content
def test_roundtrip():
roundtrip = str('').join(map(chr, xrange(256)))
test_basic(roundtrip,roundtrip)
| apache-2.0 |
ntddk/pemu | scripts/tracetool/format/simpletrace_stap.py | 84 | 2400 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generate .stp file that outputs simpletrace binary traces (DTrace with SystemTAP only).
"""
__author__ = "Stefan Hajnoczi <redhat.com>"
__copyright__ = "Copyright (C) 2014, Red Hat, Inc."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
from tracetool.backend.dtrace import binary, probeprefix
from tracetool.backend.simple import is_string
from tracetool.format.stap import stap_escape
def generate(events, backend):
out('/* This file is autogenerated by tracetool, do not edit. */',
'')
for event_id, e in enumerate(events):
if 'disable' in e.properties:
continue
out('probe %(probeprefix)s.simpletrace.%(name)s = %(probeprefix)s.%(name)s ?',
'{',
probeprefix=probeprefix(),
name=e.name)
# Calculate record size
sizes = ['24'] # sizeof(TraceRecord)
for type_, name in e.args:
name = stap_escape(name)
if is_string(type_):
out(' try {',
' arg%(name)s_str = %(name)s ? user_string_n(%(name)s, 512) : "<null>"',
' } catch {}',
' arg%(name)s_len = strlen(arg%(name)s_str)',
name=name)
sizes.append('4 + arg%s_len' % name)
else:
sizes.append('8')
sizestr = ' + '.join(sizes)
# Generate format string and value pairs for record header and arguments
fields = [('8b', str(event_id)),
('8b', 'gettimeofday_ns()'),
('4b', sizestr),
('4b', 'pid()')]
for type_, name in e.args:
name = stap_escape(name)
if is_string(type_):
fields.extend([('4b', 'arg%s_len' % name),
('.*s', 'arg%s_len, arg%s_str' % (name, name))])
else:
fields.append(('8b', name))
# Emit the entire record in a single SystemTap printf()
fmt_str = '%'.join(fmt for fmt, _ in fields)
arg_str = ', '.join(arg for _, arg in fields)
out(' printf("%%%(fmt_str)s", %(arg_str)s)',
fmt_str=fmt_str, arg_str=arg_str)
out('}')
out()
| gpl-2.0 |
CSC301H-Fall2013/JuakStore | site-packages/tests/modeltests/or_lookups/tests.py | 150 | 7625 | from __future__ import absolute_import
from datetime import datetime
from operator import attrgetter
from django.db.models import Q
from django.test import TestCase
from .models import Article
class OrLookupsTests(TestCase):
def setUp(self):
self.a1 = Article.objects.create(
headline='Hello', pub_date=datetime(2005, 11, 27)
).pk
self.a2 = Article.objects.create(
headline='Goodbye', pub_date=datetime(2005, 11, 28)
).pk
self.a3 = Article.objects.create(
headline='Hello and goodbye', pub_date=datetime(2005, 11, 29)
).pk
def test_filter_or(self):
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Hello') | Article.objects.filter(headline__startswith='Goodbye'), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Hello') | Article.objects.filter(headline__contains='bye'), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iexact='Hello') | Article.objects.filter(headline__contains='ood'), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__startswith='Goodbye')), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
def test_stages(self):
# You can shorten this syntax with code like the following, which is
# especially useful if building the query in stages:
articles = Article.objects.all()
self.assertQuerysetEqual(
articles.filter(headline__startswith='Hello') & articles.filter(headline__startswith='Goodbye'),
[]
)
self.assertQuerysetEqual(
articles.filter(headline__startswith='Hello') & articles.filter(headline__contains='bye'), [
'Hello and goodbye'
],
attrgetter("headline")
)
def test_pk_q(self):
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2)), [
'Hello',
'Goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2) | Q(pk=self.a3)), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_pk_in(self):
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[self.a1, self.a2, self.a3]), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(pk__in=(self.a1, self.a2, self.a3)), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[self.a1, self.a2, self.a3, 40000]), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_q_negated(self):
# Q objects can be negated
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | ~Q(pk=self.a2)), [
'Hello',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(~Q(pk=self.a1) & ~Q(pk=self.a2)), [
'Hello and goodbye'
],
attrgetter("headline"),
)
# This allows for more complex queries than filter() and exclude()
# alone would allow
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) & (~Q(pk=self.a2) | Q(pk=self.a3))), [
'Hello'
],
attrgetter("headline"),
)
def test_complex_filter(self):
# The 'complex_filter' method supports framework features such as
# 'limit_choices_to' which normally take a single dictionary of lookup
# arguments but need to support arbitrary queries via Q objects too.
self.assertQuerysetEqual(
Article.objects.complex_filter({'pk': self.a1}), [
'Hello'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.complex_filter(Q(pk=self.a1) | Q(pk=self.a2)), [
'Hello',
'Goodbye'
],
attrgetter("headline"),
)
def test_empty_in(self):
# Passing "in" an empty list returns no results ...
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[]),
[]
)
# ... but can return results if we OR it with another query.
self.assertQuerysetEqual(
Article.objects.filter(Q(pk__in=[]) | Q(headline__icontains='goodbye')), [
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_q_and(self):
# Q arg objects are ANDed
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')), [
'Hello and goodbye'
],
attrgetter("headline")
)
# Q arg AND order is irrelevant
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__contains='bye'), headline__startswith='Hello'), [
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello') & Q(headline__startswith='Goodbye')),
[]
)
def test_q_exclude(self):
self.assertQuerysetEqual(
Article.objects.exclude(Q(headline__startswith='Hello')), [
'Goodbye'
],
attrgetter("headline")
)
def test_other_arg_queries(self):
# Try some arg queries with operations other than filter.
self.assertEqual(
Article.objects.get(Q(headline__startswith='Hello'), Q(headline__contains='bye')).headline,
'Hello and goodbye'
)
self.assertEqual(
Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__contains='bye')).count(),
3
)
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')).values(), [
{"headline": "Hello and goodbye", "id": self.a3, "pub_date": datetime(2005, 11, 29)},
],
lambda o: o,
)
self.assertEqual(
Article.objects.filter(Q(headline__startswith='Hello')).in_bulk([self.a1, self.a2]),
{self.a1: Article.objects.get(pk=self.a1)}
)
| mit |
jjx02230808/project0223 | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
zibneuro/brainvispy | generators/uniformpointcloud.py | 1 | 2947 | import vtk
import math
import numpy as np
from core.settings import Settings
class UPoint:
def __init__(self, p, dist_to_closest_point):
self.p = p
self.dist_to_closest_point = dist_to_closest_point
def compute_energy(self):
diff = self.dist_to_closest_point - Settings.inter_neuron_distance
if diff >= 0:
return diff
return -5*diff
class UniformPointCloud:
def __init__(self, target_point):
self.__target_point = np.array([target_point[0], target_point[1], target_point[2]])
self.__points = vtk.vtkPolyData()
self.__points.SetPoints(vtk.vtkPoints())
self.__point_locator = vtk.vtkPointLocator()
self.__point_locator.SetDataSet(self.__points)
def add_single_point(self, p):
self.__points.GetPoints().InsertNextPoint(p)
self.__points.Modified()
self.__point_locator.Update()
def insert_best_point(self, point_candidates):
if self.__points.GetNumberOfPoints() <= 0:
point = self.__select_point_closest_to_target(point_candidates)
else:
point = self.__select_best_point(point_candidates)
self.__points.GetPoints().InsertNextPoint(point)
self.__points.Modified()
self.__point_locator.Update()
return point
def __select_point_closest_to_target(self, points):
closest_point = points[0]
min_dist = self.__compute_distance_to_target(closest_point)
for p in points[1:]:
dist = self.__compute_distance_to_target(p)
if dist < min_dist:
min_dist = dist
closest_point = p
return closest_point
def __select_best_point(self, points):
evaluated_points = list()
for p in points:
evaluated_points.append(UPoint(p, self.__compute_distance_to_closest_point(p)))
evaluated_points.sort(key = lambda point: point.compute_energy())
min_dist_to_target = self.__compute_distance_to_target(evaluated_points[0].p)
best_point = evaluated_points[0].p
return best_point
list_end = max(len(evaluated_points)//20, 1)
for evaluated_point in evaluated_points[1:list_end]:
dist_to_target = self.__compute_distance_to_target(evaluated_point.p)
if dist_to_target < min_dist_to_target:
min_dist_to_target = dist_to_target
best_point = evaluated_point.p
return best_point
def __compute_distance_to_target(self, p):
return np.linalg.norm(p - self.__target_point)
def __compute_distance_to_closest_point(self, p):
# Make sure there are points in the point cloud
if self.__points.GetNumberOfPoints() <= 0:
return float("inf")
# Find the point closest to 'p'
ids = vtk.vtkIdList()
self.__point_locator.FindClosestNPoints(1, p, ids)
closest_point = self.__points.GetPoint(ids.GetId(0))
# Return the distance between 'p' and the closest point
x = p[0] - closest_point[0]
y = p[1] - closest_point[1]
return math.sqrt(x*x + y*y)
#return np.linalg.norm(p - closest_point)
| bsd-3-clause |
openmicroscopy/omero-marshal | omero_marshal/decode/decoders/screen.py | 1 | 1831 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Glencoe Software, Inc. All rights reserved.
#
# This software is distributed under the terms described by the LICENCE file
# you can find at the root of the distribution bundle.
# If the file is missing please request a copy by contacting
# [email protected].
#
from ... import SCHEMA_VERSION
from .annotation import AnnotatableDecoder
from omero.model import ScreenI
class Screen201501Decoder(AnnotatableDecoder):
TYPE = 'http://www.openmicroscopy.org/Schemas/SPW/2015-01#Screen'
OMERO_CLASS = ScreenI
def decode(self, data):
v = super(Screen201501Decoder, self).decode(data)
self.set_property(v, 'name', data.get('Name'))
self.set_property(v, 'description', data.get('Description'))
self.set_property(v, 'protocolDescription',
data.get('ProtocolDescription'))
self.set_property(v, 'protocolIdentifier',
data.get('ProtocolIdentifier'))
self.set_property(v, 'reagentSetDescription',
data.get('ReagentSetDescription'))
self.set_property(v, 'reagentSetIdentifier',
data.get('ReagentSetIdentifier'))
self.set_property(v, 'type', data.get('Type'))
for plate in data.get('Plates', list()):
plate_decoder = self.ctx.get_decoder(plate['@type'])
v.linkPlate(plate_decoder.decode(plate))
return v
class Screen201606Decoder(Screen201501Decoder):
TYPE = 'http://www.openmicroscopy.org/Schemas/OME/2016-06#Screen'
if SCHEMA_VERSION == '2015-01':
decoder = (Screen201501Decoder.TYPE, Screen201501Decoder)
elif SCHEMA_VERSION == '2016-06':
decoder = (Screen201606Decoder.TYPE, Screen201606Decoder)
ScreenDecoder = decoder[1]
| gpl-2.0 |
tellesnobrega/storm_plugin | sahara/tests/unit/service/validation/test_cluster_template_create_validation.py | 5 | 8632 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service import api
from sahara.service.validations import cluster_templates as ct
from sahara.tests.unit.service.validation import utils as u
class TestClusterTemplateCreateValidation(u.ValidationTestCase):
def setUp(self):
super(TestClusterTemplateCreateValidation, self).setUp()
self._create_object_fun = ct.check_cluster_template_create
self.scheme = ct.CLUSTER_TEMPLATE_SCHEMA
api.plugin_base.setup_plugins()
def test_cluster_template_create_v_cluster_configs(self):
self._assert_cluster_configs_validation()
def test_cluster_template_create_v_ng(self):
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1",
'node_groups': [
{'name': 'a'}
]
},
bad_req_i=(1, 'VALIDATION_ERROR',
"{'name': 'a'} is not valid under "
"any of the given schemas")
)
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1",
'node_groups': [
{'name': 'a',
'flavor_id': '42'}
]
},
bad_req_i=(1, "VALIDATION_ERROR",
"{'name': 'a', 'flavor_id': '42'} "
"is not valid under any of the given schemas")
)
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1",
'node_groups': [
{'name': 'a',
'flavor_id': '42',
'node_processes': ['namenode']}
]
},
bad_req_i=(1, "VALIDATION_ERROR",
"{'node_processes': ['namenode'], "
"'name': 'a', "
"'flavor_id': '42'} "
"is not valid under any of the given schemas")
)
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1",
'node_groups': [
{
'name': 'a',
'flavor_id': '42',
'node_processes': ['namenode'],
'count': 1
},
{
"node_group_template_id": "550e8400-e29b-41d4-a716-"
"446655440000",
"name": "a",
'count': 2
}
]
},
bad_req_i=(1, "INVALID_REFERENCE",
"Duplicates in node group names are detected")
)
def test_cluster_template_create_v_ng_templates(self):
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1",
'node_groups': [
{
"node_group_template_id": "",
"name": "test",
}
]
},
bad_req_i=(1, "VALIDATION_ERROR",
"{'node_group_template_id': '', 'name': 'test'} "
"is not valid under any of the given schemas")
)
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1",
'node_groups': [
{
"node_group_template_id": "test",
"name": "test",
'count': 3
}
]
},
bad_req_i=(1, "VALIDATION_ERROR",
"{'count': 3, "
"'node_group_template_id': 'test', "
"'name': 'test'} "
"is not valid under any of the given schemas")
)
def test_cluster_template_create_v_ng_templates_right(self):
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1",
'node_groups': [
{
"node_group_template_id": "550e8400-e29b-41d4-a716-"
"446655440000",
"name": "test",
'count': 3
}
]
},
)
def test_cluster_template_create_v_name_base(self):
data = {
'name': "testname",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1"
}
self._assert_valid_name_hostname_validation(data)
def test_cluster_template_create_v_types(self):
data = {
'name': "testname",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1"
}
self._assert_types(data)
def test_cluster_template_create_v_required(self):
self._assert_create_object_validation(
data={},
bad_req_i=(1, "VALIDATION_ERROR",
u"'name' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'test-name'
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'plugin_name' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'testname',
'plugin_name': 'vanilla'
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'hadoop_version' is a required property")
)
def test_cluster_template_create_v_right(self):
self._assert_create_object_validation(
data={
'name': 'testname',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1'
})
def test_cluster_template_create_v_plugin_name_exists(self):
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "wrong_plugin",
'hadoop_version': "1.2.1",
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Sahara doesn't contain plugin "
"with name 'wrong_plugin'")
)
def test_cluster_template_create_v_unique_cl(self):
data = {
'name': 'test',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1'
}
self._assert_create_object_validation(
data=data,
bad_req_i=(1, 'NAME_ALREADY_EXISTS',
"Cluster template with name 'test' already exists")
)
def test_cluster_template_wrong_neutron_mngmt_net(self):
data = {
'name': 'test-template',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'neutron_management_network': '53a36917-ab9f-4589'
'-94ce-b6df85a68332'
}
self._assert_create_object_validation(
data=data,
bad_req_i=(1, 'INVALID_REFERENCE', "Network 53a36917-ab9f-4589-"
"94ce-b6df85a68332 not found")
)
def test_cluster_create_v_default_image_required_tags(self):
self._assert_cluster_default_image_tags_validation()
| apache-2.0 |
nwjs/chromium.src | tools/cr/cr/actions/runner.py | 10 | 2365 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the Runner base class."""
import cr
class Runner(cr.Action, cr.Plugin.Type):
"""Base class for implementing target runners.
Runner implementations must implement the Kill, Run and Test methods.
"""
SELECTOR_ARG = '--runner'
SELECTOR = 'CR_RUNNER'
SELECTOR_HELP = 'Sets the runner to use to execute the target.'
@classmethod
def AddArguments(cls, command, parser):
parser.add_argument(
'--test', dest='CR_TEST_TYPE',
choices=cr.Target.TEST_TYPES,
default=None,
help="""
Sets the test type to use,
defaults to choosing based on the target.
Set to 'no' to force it to not be a test.
"""
)
cls.AddSelectorArg(command, parser)
@cr.Plugin.activemethod
def Kill(self, targets, arguments):
"""Stops all running processes that match a target."""
raise NotImplementedError('Must be overridden.')
@cr.Plugin.activemethod
def Run(self, target, arguments):
"""Run a new copy of a runnable target."""
raise NotImplementedError('Must be overridden.')
@cr.Plugin.activemethod
def Test(self, target, arguments):
"""Run a test target."""
raise NotImplementedError('Must be overridden.')
@cr.Plugin.activemethod
def Invoke(self, targets, arguments):
"""Invoke a target.
This dispatches to either Test or Run depending on the target type.
"""
for target in targets:
if target.is_test:
self.Test(target, arguments)
else:
self.Run(target, arguments)
@cr.Plugin.activemethod
def Restart(self, targets, arguments):
"""Force a target to restart if it is already running.
Default implementation is to do a Kill Invoke sequence.
Do not call the base version if you implement a more efficient one.
"""
self.Kill(targets, [])
self.Invoke(targets, arguments)
class SkipRunner(Runner):
"""A Runner the user chooses to bypass the run step of a command."""
@property
def priority(self):
return super(SkipRunner, self).priority - 1
def Kill(self, targets, arguments):
pass
def Run(self, target, arguments):
pass
def Test(self, target, arguments):
pass
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.