repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
mjschultz/ansible-modules-extras
|
notification/typetalk.py
|
18
|
3693
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: typetalk
version_added: "1.6"
short_description: Send a message to typetalk
description:
- Send a message to typetalk using typetalk API ( http://developers.typetalk.in/ )
options:
client_id:
description:
- OAuth2 client ID
required: true
client_secret:
description:
- OAuth2 client secret
required: true
topic:
description:
- topic id to post message
required: true
msg:
description:
- message body
required: true
requirements: [ json ]
author: "Takashi Someda (@tksmd)"
'''
EXAMPLES = '''
- typetalk: client_id=12345 client_secret=12345 topic=1 msg="install completed"
'''
import urllib
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
json = None
def do_request(module, url, params, headers=None):
data = urllib.urlencode(params)
if headers is None:
headers = dict()
headers = dict(headers, **{
'User-Agent': 'Ansible/typetalk module',
})
r, info = fetch_url(module, url, data=data, headers=headers)
if info['status'] != 200:
exc = ConnectionError(info['msg'])
exc.code = info['status']
raise exc
return r
def get_access_token(client_id, client_secret):
params = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'client_credentials',
'scope': 'topic.post'
}
res = do_request('https://typetalk.in/oauth2/access_token', params)
return json.load(res)['access_token']
def send_message(module, client_id, client_secret, topic, msg):
"""
send message to typetalk
"""
try:
access_token = get_access_token(client_id, client_secret)
url = 'https://typetalk.in/api/v1/topics/%d' % topic
headers = {
'Authorization': 'Bearer %s' % access_token,
}
do_request(module, url, {'message': msg}, headers)
return True, {'access_token': access_token}
except ConnectionError, e:
return False, e
def main():
module = AnsibleModule(
argument_spec=dict(
client_id=dict(required=True),
client_secret=dict(required=True),
topic=dict(required=True, type='int'),
msg=dict(required=True),
),
supports_check_mode=False
)
if not json:
module.fail_json(msg="json module is required")
client_id = module.params["client_id"]
client_secret = module.params["client_secret"]
topic = module.params["topic"]
msg = module.params["msg"]
res, error = send_message(module, client_id, client_secret, topic, msg)
if not res:
module.fail_json(msg='fail to send message with response code %s' % error.code)
module.exit_json(changed=True, topic=topic, msg=msg)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
Yannig/ansible
|
lib/ansible/module_utils/api.py
|
103
|
4446
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2015 Brian Ccoa, <[email protected]>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This module adds shared support for generic api modules
In order to use this module, include it as part of a custom
module as shown below.
The 'api' module provides the following common argument specs:
* rate limit spec
- rate: number of requests per time unit (int)
- rate_limit: time window in which the limit is applied in seconds
* retry spec
- retries: number of attempts
- retry_pause: delay between attempts in seconds
"""
import time
def rate_limit_argument_spec(spec=None):
"""Creates an argument spec for working with rate limiting"""
arg_spec = (dict(
rate=dict(type='int'),
rate_limit=dict(type='int'),
))
if spec:
arg_spec.update(spec)
return arg_spec
def retry_argument_spec(spec=None):
"""Creates an argument spec for working with retrying"""
arg_spec = (dict(
retries=dict(type='int'),
retry_pause=dict(type='float', default=1),
))
if spec:
arg_spec.update(spec)
return arg_spec
def basic_auth_argument_spec(spec=None):
arg_spec = (dict(
api_username=dict(type='str'),
api_password=dict(type='str', no_log=True),
api_url=dict(type='str'),
validate_certs=dict(type='bool', default=True)
))
if spec:
arg_spec.update(spec)
return arg_spec
def rate_limit(rate=None, rate_limit=None):
"""rate limiting decorator"""
minrate = None
if rate is not None and rate_limit is not None:
minrate = float(rate_limit) / float(rate)
def wrapper(f):
last = [0.0]
def ratelimited(*args, **kwargs):
if minrate is not None:
elapsed = time.clock() - last[0]
left = minrate - elapsed
if left > 0:
time.sleep(left)
last[0] = time.clock()
ret = f(*args, **kwargs)
return ret
return ratelimited
return wrapper
def retry(retries=None, retry_pause=1):
"""Retry decorator"""
def wrapper(f):
retry_count = 0
def retried(*args, **kwargs):
if retries is not None:
ret = None
while True:
# pylint doesn't understand this is a closure
retry_count += 1 # pylint: disable=undefined-variable
if retry_count >= retries:
raise Exception("Retry limit exceeded: %d" % retries)
try:
ret = f(*args, **kwargs)
except:
pass
if ret:
break
time.sleep(retry_pause)
return ret
return retried
return wrapper
|
gpl-3.0
|
Titulacion-Sistemas/PythonTitulacion-EV
|
Lib/site-packages/django/contrib/formtools/tests/wizard/wizardtests/tests.py
|
116
|
16304
|
from __future__ import unicode_literals
import os
from django import forms
from django.test import TestCase
from django.test.client import RequestFactory
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.formtools.wizard.views import CookieWizardView
from django.utils._os import upath
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = '__all__'
UserFormSet = forms.models.modelformset_factory(User, form=UserForm, extra=2)
class WizardTests(object):
urls = 'django.contrib.formtools.tests.wizard.wizardtests.urls'
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def test_initial_call(self):
response = self.client.get(self.wizard_url)
wizard = response.context['wizard']
self.assertEqual(response.status_code, 200)
self.assertEqual(wizard['steps'].current, 'form1')
self.assertEqual(wizard['steps'].step0, 0)
self.assertEqual(wizard['steps'].step1, 1)
self.assertEqual(wizard['steps'].last, 'form4')
self.assertEqual(wizard['steps'].prev, None)
self.assertEqual(wizard['steps'].next, 'form2')
self.assertEqual(wizard['steps'].count, 4)
def test_form_post_error(self):
response = self.client.post(self.wizard_url, self.wizard_step_1_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context['wizard']['form'].errors,
{'name': ['This field is required.'],
'user': ['This field is required.']})
def test_form_post_success(self):
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
wizard = response.context['wizard']
self.assertEqual(response.status_code, 200)
self.assertEqual(wizard['steps'].current, 'form2')
self.assertEqual(wizard['steps'].step0, 1)
self.assertEqual(wizard['steps'].prev, 'form1')
self.assertEqual(wizard['steps'].next, 'form3')
def test_form_stepback(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(self.wizard_url, {
'wizard_goto_step': response.context['wizard']['steps'].prev})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_template_context(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context.get('another_var', None), None)
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
self.assertEqual(response.context.get('another_var', None), True)
# ticket #19025: `form` should be included in context
form = response.context_data['wizard']['form']
self.assertEqual(response.context_data['form'], form)
def test_form_finish(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(upath(__file__), 'rb')
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
all_data = response.context['form_list']
with open(upath(__file__), 'rb') as f:
self.assertEqual(all_data[1]['file1'].read(), f.read())
all_data[1]['file1'].close()
del all_data[1]['file1']
self.assertEqual(all_data, [
{'name': 'Pony', 'thirsty': True, 'user': self.testuser},
{'address1': '123 Main St', 'address2': 'Djangoland'},
{'random_crap': 'blah blah'},
[{'random_crap': 'blah blah'},
{'random_crap': 'blah blah'}]])
def test_cleaned_data(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
with open(upath(__file__), 'rb') as post_file:
post_data['form2-file1'] = post_file
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
all_data = response.context['all_cleaned_data']
with open(upath(__file__), 'rb') as f:
self.assertEqual(all_data['file1'].read(), f.read())
all_data['file1'].close()
del all_data['file1']
self.assertEqual(all_data, {
'name': 'Pony', 'thirsty': True, 'user': self.testuser,
'address1': '123 Main St', 'address2': 'Djangoland',
'random_crap': 'blah blah', 'formset-form4': [
{'random_crap': 'blah blah'},
{'random_crap': 'blah blah'}]})
def test_manipulated_data(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
post_data['form2-file1'].close()
post_data['form2-file1'] = open(upath(__file__), 'rb')
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
self.client.cookies.pop('sessionid', None)
self.client.cookies.pop('wizard_cookie_contact_wizard', None)
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_refresh(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
post_data['form2-file1'].close()
post_data['form2-file1'] = open(upath(__file__), 'rb')
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
@skipIfCustomUser
class SessionWizardTests(WizardTests, TestCase):
wizard_url = '/wiz_session/'
wizard_step_1_data = {
'session_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'session_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'session_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form4',
}
)
@skipIfCustomUser
class CookieWizardTests(WizardTests, TestCase):
wizard_url = '/wiz_cookie/'
wizard_step_1_data = {
'cookie_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'cookie_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'cookie_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form4',
}
)
@skipIfCustomUser
class WizardTestKwargs(TestCase):
wizard_url = '/wiz_other_template/'
wizard_step_1_data = {
'cookie_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'cookie_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'cookie_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form4',
}
)
urls = 'django.contrib.formtools.tests.wizard.wizardtests.urls'
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def test_template(self):
templates = os.path.join(os.path.dirname(upath(__file__)), 'templates')
with self.settings(
TEMPLATE_DIRS=list(settings.TEMPLATE_DIRS) + [templates]):
response = self.client.get(self.wizard_url)
self.assertTemplateUsed(response, 'other_wizard_form.html')
class WizardTestGenericViewInterface(TestCase):
def test_get_context_data_inheritance(self):
class TestWizard(CookieWizardView):
"""
A subclass that implements ``get_context_data`` using the standard
protocol for generic views (accept only **kwargs).
See ticket #17148.
"""
def get_context_data(self, **kwargs):
context = super(TestWizard, self).get_context_data(**kwargs)
context['test_key'] = 'test_value'
return context
factory = RequestFactory()
view = TestWizard.as_view([forms.Form])
response = view(factory.get('/'))
self.assertEqual(response.context_data['test_key'], 'test_value')
def test_get_context_data_with_mixin(self):
class AnotherMixin(object):
def get_context_data(self, **kwargs):
context = super(AnotherMixin, self).get_context_data(**kwargs)
context['another_key'] = 'another_value'
return context
class TestWizard(AnotherMixin, CookieWizardView):
"""
A subclass that implements ``get_context_data`` using the standard
protocol for generic views (accept only **kwargs).
See ticket #17148.
"""
def get_context_data(self, **kwargs):
context = super(TestWizard, self).get_context_data(**kwargs)
context['test_key'] = 'test_value'
return context
factory = RequestFactory()
view = TestWizard.as_view([forms.Form])
response = view(factory.get('/'))
self.assertEqual(response.context_data['test_key'], 'test_value')
self.assertEqual(response.context_data['another_key'], 'another_value')
@skipIfCustomUser
class WizardFormKwargsOverrideTests(TestCase):
def setUp(self):
super(WizardFormKwargsOverrideTests, self).setUp()
self.rf = RequestFactory()
# Create two users so we can filter by is_staff when handing our
# wizard a queryset keyword argument.
self.normal_user = User.objects.create(username='test1', email='[email protected]')
self.staff_user = User.objects.create(username='test2', email='[email protected]', is_staff=True)
def test_instance_is_maintained(self):
self.assertEqual(2, User.objects.count())
queryset = User.objects.get(pk=self.staff_user.pk)
class InstanceOverrideWizard(CookieWizardView):
def get_form_kwargs(self, step):
return {'instance': queryset}
view = InstanceOverrideWizard.as_view([UserForm])
response = view(self.rf.get('/'))
form = response.context_data['wizard']['form']
self.assertNotEqual(form.instance.pk, None)
self.assertEqual(form.instance.pk, self.staff_user.pk)
self.assertEqual('[email protected]', form.initial.get('email', None))
def test_queryset_is_maintained(self):
queryset = User.objects.filter(pk=self.staff_user.pk)
class QuerySetOverrideWizard(CookieWizardView):
def get_form_kwargs(self, step):
return {'queryset': queryset}
view = QuerySetOverrideWizard.as_view([UserFormSet])
response = view(self.rf.get('/'))
formset = response.context_data['wizard']['form']
self.assertNotEqual(formset.queryset, None)
self.assertEqual(formset.initial_form_count(), 1)
self.assertEqual(['[email protected]'],
list(formset.queryset.values_list('email', flat=True)))
|
mit
|
olivierdalang/QGIS
|
scripts/process_function_template.py
|
26
|
4716
|
# -*- coding: utf-8 -*-
import sys
import os
import json
import glob
sys.path.append(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../python/ext-libs'))
cpp = open(sys.argv[1], "w", encoding="utf-8")
cpp.write(
"#include \"qgsexpression.h\"\n"
"#include \"qgsexpression_p.h\"\n"
"#include <mutex>\n"
"\n"
"void QgsExpression::initFunctionHelp()\n"
"{\n"
" static std::once_flag initialized;\n"
" std::call_once( initialized, []\n"
" {"
)
def quote(v):
if isinstance(v, dict):
for k in v:
v[k] = quote(v[k])
return v
elif isinstance(v, list):
return map(quote, v)
elif isinstance(v, str):
return v.replace('"', '\\"').replace('\n', '\\n')
elif isinstance(v, bool):
return v
else:
raise BaseException("unexpected type " + repr(v))
for f in sorted(glob.glob('resources/function_help/json/*')):
with open(f, encoding="utf-8") as function_file:
try:
json_params = json.load(function_file)
except:
print(f)
raise
json_params = quote(json_params)
for field in ['name', 'type']:
if field not in json_params:
raise BaseException("%s: %s missing" % (f, field))
if not json_params['type'] in ['function', 'operator', 'value', 'expression', 'group']:
raise BaseException("%s: invalid type %s " % (f, json_params['type']))
if 'variants' not in json_params:
# convert single variant shortcut to a expanded variant
v = {}
for i in json_params:
v[i] = json_params[i]
v['variant'] = json_params['name']
v['variant_description'] = json_params['description']
json_params['variants'] = [v]
name = "\"{0}\"".format(json_params['name'])
if json_params['type'] == 'operator':
for v in json_params['variants']:
if 'arguments' not in v:
raise BaseException("%s: arguments expected for operator")
if len(list(v['arguments'])) < 1 or len(list(v['arguments'])) > 2:
raise BaseException("%s: 1 or 2 arguments expected for operator")
cpp.write("\n\n functionHelpTexts().insert( QStringLiteral( {0} ),\n Help( QStringLiteral( {0} ), tr( \"{1}\" ), tr( \"{2}\" ),\n QList<HelpVariant>()".format(
name, json_params['type'], json_params['description'])
)
for v in json_params['variants']:
cpp.write(
"\n << HelpVariant( tr( \"{0}\" ), tr( \"{1}\" ),\n QList<HelpArg>()".format(v['variant'], v['variant_description']))
if 'arguments' in v:
for a in v['arguments']:
cpp.write("\n << HelpArg( QStringLiteral( \"{0}\" ), tr( \"{1}\" ), {2}, {3}, {4}, {5} )".format(
a['arg'],
a.get('description', ''),
"true" if a.get('descOnly', False) else "false",
"true" if a.get('syntaxOnly', False) else "false",
"true" if a.get('optional', False) else "false",
'QStringLiteral( "{}" )'.format(a.get('default', '')) if a.get('default', '') else "QString()"
)
)
cpp.write(",\n /* variableLenArguments */ {0}".format(
"true" if v.get('variableLenArguments', False) else "false"))
cpp.write(",\n QList<HelpExample>()")
if 'examples' in v:
for e in v['examples']:
cpp.write("\n << HelpExample( tr( \"{0}\" ), tr( \"{1}\" ), tr( \"{2}\" ) )".format(
e['expression'],
e['returns'],
e.get('note', ''))
)
if 'notes' in v:
cpp.write(",\n tr( \"{0}\" )".format(v['notes']))
else:
cpp.write(",\n QString()")
if 'tags' in v:
cpp.write(",\n QStringList()")
for t in v['tags']:
cpp.write("\n << QStringLiteral( \"{0}\" ) << tr( \"{0}\" )".format(t))
cpp.write("\n )")
cpp.write("\n )")
cpp.write("\n );")
for f in sorted(glob.glob('resources/function_help/text/*')):
n = os.path.basename(f)
with open(f) as content:
cpp.write("\n\n functionHelpTexts().insert( \"{0}\",\n Help( tr( \"{0}\" ), tr( \"group\" ), tr( \"{1}\" ), QList<HelpVariant>() ) );\n".format(
n, content.read().replace("\\", "\").replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n')))
cpp.write("\n } );\n}\n")
cpp.close()
|
gpl-2.0
|
muzena/deluge
|
deluge/ui/console/modes/torrent_actions.py
|
5
|
14567
|
# torrent_actions.py
#
# Copyright (C) 2011 Nick Lanham <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
from deluge.ui.client import client
from popup import SelectablePopup, Popup
from input_popup import InputPopup
import deluge.component as component
from deluge.ui.console import colors, modes
from twisted.internet import defer
import logging
log = logging.getLogger(__name__)
torrent_options = [
("max_download_speed", float),
("max_upload_speed", float),
("max_connections", int),
("max_upload_slots", int),
("prioritize_first_last", bool),
("sequential_download", bool),
("is_auto_managed", bool),
("stop_at_ratio", bool),
("stop_ratio", float),
("remove_at_ratio", bool),
("move_on_completed", bool),
("move_on_completed_path", str)
]
torrent_options_to_names = {
"max_download_speed": "Max DL speed",
"max_upload_speed": "Max UL speed",
"max_connections": "Max connections",
"max_upload_slots": "Max upload slots",
"prioritize_first_last": "Prioritize first/last pieces",
"sequential_download": "Sequential download",
"is_auto_managed": "Is auto managed?",
"stop_at_ratio": "Stop at ratio",
"stop_ratio": "Seeding ratio limit",
"remove_at_ratio": "Remove after reaching ratio",
"move_on_completed": "Move torrent after completion",
"move_on_completed_path": "Path to move the torrent to"
}
class ACTION:
PAUSE=0
RESUME=1
REANNOUNCE=2
EDIT_TRACKERS=3
RECHECK=4
REMOVE=5
REMOVE_DATA=6
REMOVE_NODATA=7
DETAILS=8
MOVE_STORAGE=9
QUEUE=10
QUEUE_TOP=11
QUEUE_UP=12
QUEUE_DOWN=13
QUEUE_BOTTOM=14
TORRENT_OPTIONS=15
def action_error(error,mode):
rerr = error.value
mode.report_message("An Error Occurred","%s got error %s: %s"%(rerr.method,rerr.exception_type,rerr.exception_msg))
mode.refresh()
def torrent_action(idx, data, mode, ids):
if ids:
if data==ACTION.PAUSE:
log.debug("Pausing torrents: %s",ids)
client.core.pause_torrent(ids).addErrback(action_error,mode)
elif data==ACTION.RESUME:
log.debug("Resuming torrents: %s", ids)
client.core.resume_torrent(ids).addErrback(action_error,mode)
elif data==ACTION.QUEUE:
def do_queue(idx,qact,mode,ids):
def move_selection(r):
if mode.config["move_selection"]:
queue_length = 0
selected_num = 0
for tid in mode.curstate:
tq = mode.curstate.get(tid)["queue"]
if tq != -1:
queue_length += 1
if tq in mode.marked:
selected_num += 1
if qact == ACTION.QUEUE_TOP:
if mode.marked:
mode.cursel = 1 + sorted(mode.marked).index(mode.cursel)
else:
mode.cursel = 1
mode.marked = range(1, selected_num + 1)
elif qact == ACTION.QUEUE_UP:
mode.cursel = max(1, mode.cursel - 1)
mode.marked = map(lambda v: v-1, mode.marked)
mode.marked = filter(lambda v: v>0, mode.marked)
elif qact == ACTION.QUEUE_DOWN:
mode.cursel = min(queue_length, mode.cursel + 1)
mode.marked = map(lambda v: v+1, mode.marked)
mode.marked = filter(lambda v: v<=queue_length, mode.marked)
elif qact == ACTION.QUEUE_BOTTOM:
if mode.marked:
mode.cursel = queue_length - selected_num + 1 + sorted(mode.marked).index(mode.cursel)
else:
mode.cursel = queue_length
mode.marked = range(queue_length - selected_num + 1, queue_length+1)
if qact == ACTION.QUEUE_TOP:
log.debug("Queuing torrents top")
client.core.queue_top(ids).addCallback(move_selection)
elif qact == ACTION.QUEUE_UP:
log.debug("Queuing torrents up")
client.core.queue_up(ids).addCallback(move_selection)
elif qact == ACTION.QUEUE_DOWN:
log.debug("Queuing torrents down")
client.core.queue_down(ids).addCallback(move_selection)
elif qact == ACTION.QUEUE_BOTTOM:
log.debug("Queuing torrents bottom")
client.core.queue_bottom(ids).addCallback(move_selection)
if len(ids) == 1:
mode.clear_marks()
return True
popup = SelectablePopup(mode,"Queue Action", do_queue, (mode, ids))
popup.add_line("_Top",data=ACTION.QUEUE_TOP)
popup.add_line("_Up",data=ACTION.QUEUE_UP)
popup.add_line("_Down",data=ACTION.QUEUE_DOWN)
popup.add_line("_Bottom",data=ACTION.QUEUE_BOTTOM)
mode.set_popup(popup)
return False
elif data==ACTION.REMOVE:
def do_remove(data):
if not data: return
mode.clear_marks()
wd = data["remove_files"]
for tid in ids:
log.debug("Removing torrent: %s, %d", tid, wd)
client.core.remove_torrent(tid,wd).addErrback(action_error,mode)
rem_msg = ""
def got_status(status):
return (status["name"], status["state"])
callbacks = []
for tid in ids:
d = client.core.get_torrent_status(tid, ["name", "state"])
callbacks.append( d.addCallback(got_status) )
def finish_up(status):
status = map(lambda x: x[1], status)
if len(ids) == 1:
rem_msg = "{!info!}Removing the following torrent:{!input!}"
else:
rem_msg = "{!info!}Removing the following torrents:{!input!}"
for i, (name, state) in enumerate(status):
color = colors.state_color[state]
rem_msg += "\n %s* {!input!}%s" % (color, name)
if i == 5:
if i < len(status):
rem_msg += "\n {!red!}And %i more" % (len(status) - 5)
break
popup = InputPopup(mode, "(Esc to cancel, Enter to remove)", close_cb=do_remove)
popup.add_text(rem_msg)
popup.add_spaces(1)
popup.add_select_input("{!info!}Torrent files:", 'remove_files', ["Keep", "Remove"], [False, True], False)
mode.set_popup(popup)
defer.DeferredList(callbacks).addCallback(finish_up)
return False
elif data==ACTION.MOVE_STORAGE:
def do_move(res):
import os.path
if os.path.exists(res["path"]) and not os.path.isdir(res["path"]):
mode.report_message("Cannot Move Storage","{!error!}%s exists and is not a directory"%res["path"])
else:
log.debug("Moving %s to: %s",ids,res["path"])
client.core.move_storage(ids,res["path"]).addErrback(action_error,mode)
if len(ids) == 1:
mode.clear_marks()
return True
popup = InputPopup(mode,"Move Storage (Esc to cancel)",close_cb=do_move)
popup.add_text_input("Enter path to move to:","path")
mode.set_popup(popup)
return False
elif data==ACTION.RECHECK:
log.debug("Rechecking torrents: %s", ids)
client.core.force_recheck(ids).addErrback(action_error,mode)
elif data==ACTION.REANNOUNCE:
log.debug("Reannouncing torrents: %s",ids)
client.core.force_reannounce(ids).addErrback(action_error,mode)
elif data==ACTION.DETAILS:
log.debug("Torrent details")
tid = mode.current_torrent_id()
if tid:
mode.show_torrent_details(tid)
else:
log.error("No current torrent in _torrent_action, this is a bug")
elif data==ACTION.TORRENT_OPTIONS:
mode.popup = Popup(mode, "Torrent options")
mode.popup.add_line("Querying core, please wait...")
torrents = ids
options = {}
def _do_set_torrent_options(ids, result):
options = {}
for opt in result:
if result[opt] not in ["multiple", None]:
options[opt] = result[opt]
client.core.set_torrent_options( ids, options )
for tid in ids:
if "move_on_completed_path" in options:
client.core.set_torrent_move_completed_path(tid, options["move_on_completed_path"])
if "move_on_completed" in options:
client.core.set_torrent_move_completed(tid, options["move_on_completed"])
if "is_auto_managed" in options:
client.core.set_torrent_auto_managed(tid, options["is_auto_managed"])
if "remove_at_ratio" in options:
client.core.set_torrent_remove_at_ratio(tid, options["remove_at_ratio"])
if "prioritize_first_last" in options:
client.core.set_torrent_prioritize_first_last(tid, options["prioritize_first_last"])
def on_torrent_status(status):
for key in status:
if key not in options:
options[key] = status[key]
elif options[key] != status[key]:
options[key] = "multiple"
def create_popup(status):
cb = lambda result, ids=ids: _do_set_torrent_options(ids, result)
option_popup = InputPopup(mode,"Set torrent options (Esc to cancel)",close_cb=cb, height_req=22)
for (field, field_type) in torrent_options:
caption = "{!info!}" + torrent_options_to_names[field]
value = options[field]
if field_type == str:
if not isinstance(value, basestring):
value = str(value)
option_popup.add_text_input(caption, field, value)
elif field_type == bool:
if options[field] == "multiple":
choices = (
["Yes", "No", "Mixed"],
[True, False, None],
2
)
else:
choices = (
["Yes", "No"],
[True, False],
[True, False].index(options[field])
)
option_popup.add_select_input(caption, field, choices[0], choices[1], choices[2])
elif field_type == float:
option_popup.add_float_spin_input(caption, field, value, min_val = -1)
elif field_type == int:
option_popup.add_int_spin_input(caption, field, value, min_val = -1)
mode.set_popup(option_popup)
mode.refresh()
callbacks = []
field_list = map(lambda t: t[0], torrent_options)
for tid in torrents:
deferred = component.get("SessionProxy").get_torrent_status(tid, field_list)
callbacks.append( deferred.addCallback(on_torrent_status) )
callbacks = defer.DeferredList(callbacks)
callbacks.addCallback(create_popup)
if len(ids) == 1:
mode.clear_marks()
return True
# Creates the popup. mode is the calling mode, tids is a list of torrents to take action upon
def torrent_actions_popup(mode,tids,details=False, action = None):
if action != None:
torrent_action(-1, action, mode, tids)
return
popup = SelectablePopup(mode,"Torrent Actions",torrent_action, (mode, tids))
popup.add_line("_Pause",data=ACTION.PAUSE)
popup.add_line("_Resume",data=ACTION.RESUME)
if details:
popup.add_divider()
popup.add_line("Queue",data=ACTION.QUEUE)
popup.add_divider()
popup.add_line("_Update Tracker",data=ACTION.REANNOUNCE)
popup.add_divider()
popup.add_line("Remo_ve Torrent",data=ACTION.REMOVE)
popup.add_line("_Force Recheck",data=ACTION.RECHECK)
popup.add_line("_Move Storage",data=ACTION.MOVE_STORAGE)
popup.add_divider()
if details:
popup.add_line("Torrent _Details",data=ACTION.DETAILS)
popup.add_line("Torrent _Options",data=ACTION.TORRENT_OPTIONS)
mode.set_popup(popup)
|
gpl-3.0
|
hazelnusse/robot.bicycle
|
design/derivative_filter.py
|
1
|
1131
|
"""
Given a continuous time first order transfer function of the form:
n1 * s + n0
-----------
s + d0
Compute the Tustin approximation and return a state space realization of this
discrete time transfer function.
"""
from sympy import symbols, Poly, ccode, S, sqrt
def discrete_realization_tustin(n0, n1, d0, T):
z = symbols('z')
s = 2/T*(z-1)/(z+1)
num = ((n1*s + n0)*T*(z + 1)).simplify()
den = ((s + d0)*T*(z + 1)).simplify()
num_poly = Poly(num, z)
den_poly = Poly(den, z)
n1_z, n0_z = num_poly.coeffs()
d1_z, d0_z = den_poly.coeffs()
# Make denominator monic and divide numerator appropriately
n1_z /= d1_z
n0_z /= d1_z
d0_z /= d1_z
a = -d0_z
b_times_c = (n0_z - n1_z * d0_z).simplify()
d = n1_z
return a, b_times_c, d
n0, n1, d0, T = symbols('n0 n1 d0 T')
#T = 0.0013
#n0 = 1.23
#n1 = 4.56
#d0 = 7.89
a, b_times_c, d = discrete_realization_tustin(n0, n1, d0, T)
#a, b_times_c, d = discrete_realization_zoh(n0, n1, d0, T)
a_str = ccode(a)
b_times_c_str = ccode(b_times_c)
d_str = ccode(d)
print(a_str)
print(b_times_c_str)
print(d_str)
|
bsd-2-clause
|
undoware/neutron-drive
|
google_appengine/lib/django_1_3/tests/regressiontests/dispatch/tests/test_saferef.py
|
51
|
2107
|
from django.dispatch.saferef import *
from django.utils import unittest
class Test1(object):
def x(self):
pass
def test2(obj):
pass
class Test2(object):
def __call__(self, obj):
pass
class Tester(unittest.TestCase):
def setUp(self):
ts = []
ss = []
for x in xrange(5000):
t = Test1()
ts.append(t)
s = safeRef(t.x, self._closure)
ss.append(s)
ts.append(test2)
ss.append(safeRef(test2, self._closure))
for x in xrange(30):
t = Test2()
ts.append(t)
s = safeRef(t, self._closure)
ss.append(s)
self.ts = ts
self.ss = ss
self.closureCount = 0
def tearDown(self):
del self.ts
del self.ss
def testIn(self):
"""Test the "in" operator for safe references (cmp)"""
for t in self.ts[:50]:
self.assertTrue(safeRef(t.x) in self.ss)
def testValid(self):
"""Test that the references are valid (return instance methods)"""
for s in self.ss:
self.assertTrue(s())
def testShortCircuit (self):
"""Test that creation short-circuits to reuse existing references"""
sd = {}
for s in self.ss:
sd[s] = 1
for t in self.ts:
if hasattr(t, 'x'):
self.assertTrue(sd.has_key(safeRef(t.x)))
self.assertTrue(safeRef(t.x) in sd)
else:
self.assertTrue(sd.has_key(safeRef(t)))
self.assertTrue(safeRef(t) in sd)
def testRepresentation (self):
"""Test that the reference object's representation works
XXX Doesn't currently check the results, just that no error
is raised
"""
repr(self.ss[-1])
def _closure(self, ref):
"""Dumb utility mechanism to increment deletion counter"""
self.closureCount +=1
def getSuite():
return unittest.makeSuite(Tester,'test')
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
openhatch/oh-mainline
|
vendor/packages/Django/django/contrib/auth/__init__.py
|
70
|
5196
|
import re
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.middleware.csrf import rotate_token
from django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
i = path.rfind('.')
module, attr = path[:i], path[i + 1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing authentication backend %s: "%s"' % (path, e))
except ValueError:
raise ImproperlyConfigured('Error importing authentication backends. Is AUTHENTICATION_BACKENDS a correctly defined list or tuple?')
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" authentication backend' % (module, attr))
return cls()
def get_backends():
from django.conf import settings
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backends.append(load_backend(backend_path))
if not backends:
raise ImproperlyConfigured('No authentication backends have been defined. Does AUTHENTICATION_BACKENDS contain anything?')
return backends
def _clean_credentials(credentials):
"""
Cleans a dictionary of credentials of potentially sensitive info before
sending to less secure functions.
Not comprehensive - intended for user_login_failed signal
"""
SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I)
CLEANSED_SUBSTITUTE = '********************'
for key in credentials:
if SENSITIVE_CREDENTIALS.search(key):
credentials[key] = CLEANSED_SUBSTITUTE
return credentials
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend in get_backends():
try:
user = backend.authenticate(**credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
return user
# The credentials supplied are invalid to all backends, fire signal
user_login_failed.send(sender=__name__,
credentials=_clean_credentials(credentials))
def login(request, user):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request. Note that data set during
the anonymous session is retained when the user logs in.
"""
if user is None:
user = request.user
# TODO: It would be nice to support different login methods, like signed cookies.
if SESSION_KEY in request.session:
if request.session[SESSION_KEY] != user.pk:
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
request.session[SESSION_KEY] = user.pk
request.session[BACKEND_SESSION_KEY] = user.backend
if hasattr(request, 'user'):
request.user = user
rotate_token(request)
user_logged_in.send(sender=user.__class__, request=request, user=user)
def logout(request):
"""
Removes the authenticated user's ID from the request and flushes their
session data.
"""
# Dispatch the signal before the user is logged out so the receivers have a
# chance to find out *who* logged out.
user = getattr(request, 'user', None)
if hasattr(user, 'is_authenticated') and not user.is_authenticated():
user = None
user_logged_out.send(sender=user.__class__, request=request, user=user)
request.session.flush()
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user_model():
"Return the User model that is active in this project"
from django.conf import settings
from django.db.models import get_model
try:
app_label, model_name = settings.AUTH_USER_MODEL.split('.')
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'")
user_model = get_model(app_label, model_name)
if user_model is None:
raise ImproperlyConfigured("AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL)
return user_model
def get_user(request):
from django.contrib.auth.models import AnonymousUser
try:
user_id = request.session[SESSION_KEY]
backend_path = request.session[BACKEND_SESSION_KEY]
backend = load_backend(backend_path)
user = backend.get_user(user_id) or AnonymousUser()
except KeyError:
user = AnonymousUser()
return user
|
agpl-3.0
|
0asa/scikit-learn
|
examples/neighbors/plot_digits_kde_sampling.py
|
251
|
2022
|
"""
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
|
bsd-3-clause
|
ToontownUprising/src
|
toontown/pets/PetBrain.py
|
3
|
20591
|
from pandac.PandaModules import *
from direct.showbase.PythonUtil import weightedChoice, randFloat, Functor
from direct.showbase.PythonUtil import list2dict
from direct.showbase import DirectObject
from direct.distributed import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
from direct.fsm import FSM
from toontown.toon import DistributedToonAI
from toontown.pets import PetConstants, PetObserve, PetGoal, PetGoalMgr
from toontown.pets import PetTricks, PetLookerAI
import random, types
class PetBrain(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('PetBrain')
def __init__(self, pet):
self.pet = pet
self.focus = None
self.started = 0
self.inMovie = 0
self.chaseNode = self.pet.getRender().attachNewNode('PetChaseNode')
self.goalMgr = PetGoalMgr.PetGoalMgr(self.pet)
self.doId2goals = {}
self.nearbyAvs = {}
self.avAwareness = {}
self.lastInteractTime = {}
self.nextAwarenessIndex = 0
if __dev__:
self.pscPrior = PStatCollector('App:Show code:petThink:UpdatePriorities')
self.pscAware = PStatCollector('App:Show code:petThink:ShuffleAwareness')
self.pscResc = PStatCollector('App:Show code:petThink:Reschedule')
return
def destroy(self):
taskMgr.remove(self.getTeleportTaskName())
if __dev__:
del self.pscPrior
del self.pscAware
del self.pscResc
self.stop()
self.goalMgr.destroy()
self.chaseNode.removeNode()
del self.chaseNode
del self.focus
del self.pet
if self.doId2goals:
self.notify.warning('destroy(): self.doId2goals is not empty: %s' % self.doId2goals.keys())
for goalList in self.doId2goals.values():
for goal in goalList:
goal.destroy()
del self.doId2goals
del self.avAwareness
def getThinkTaskName(self):
return 'petThink-%s' % self.pet.doId
def getTeleportTaskName(self):
return 'petTeleport-%s' % self.pet.doId
def getObserveEventAttendedByAvStart(self, otherDoId):
return 'petObserveAttendedByAvStart-%s-%s' % (self.pet.doId, otherDoId)
def getObserveEventAttendedByAvStop(self, otherDoId):
return 'petObserveAttendedByAvStop-%s-%s' % (self.pet.doId, otherDoId)
def getObserveEventAttendingAvStart(self, otherDoId):
return 'petObserveAttendingAvStart-%s-%s' % (self.pet.doId, otherDoId)
def getObserveEventAttendingAvStop(self, otherDoId):
return 'petObserveAttendingAvStop-%s-%s' % (self.pet.doId, otherDoId)
def start(self):
PetBrain.notify.debug('start: %s' % self.pet.doId)
self.lookers = {}
self.lookees = {}
self.accept(PetLookerAI.getStartLookedAtByOtherEvent(self.pet.doId), self._handleLookedAtByOtherStart)
self.accept(PetLookerAI.getStopLookedAtByOtherEvent(self.pet.doId), self._handleLookedAtByOtherStop)
self.accept(PetLookerAI.getStartLookingAtOtherEvent(self.pet.doId), self._handleLookingAtOtherStart)
self.accept(PetLookerAI.getStopLookingAtOtherEvent(self.pet.doId), self._handleLookingAtOtherStop)
self.globalGoals = [PetGoal.Wander()]
for goal in self.globalGoals:
self.goalMgr.addGoal(goal)
for doId in self.pet._getNearbyAvatarDict():
self._handleAvatarArrive(doId)
self.tLastLonelinessUpdate = globalClock.getFrameTime()
taskMgr.doMethodLater(simbase.petThinkPeriod * random.random(), self._think, self.getThinkTaskName())
self.started = 1
def stop(self):
PetBrain.notify.debug('stop: %s' % self.pet.doId)
if not self.started:
return
self.started = 0
del self.lookers
del self.lookees
for doId in self.pet._getNearbyAvatarDict():
self._handleAvatarLeave(doId)
for goal in self.globalGoals:
self.goalMgr.removeGoal(goal)
goal.destroy()
del self.globalGoals
self.clearFocus()
taskMgr.remove(self.getThinkTaskName())
self.ignore(PetLookerAI.getStartLookedAtByOtherEvent(self.pet.doId))
self.ignore(PetLookerAI.getStopLookedAtByOtherEvent(self.pet.doId))
self.ignore(PetLookerAI.getStartLookingAtOtherEvent(self.pet.doId))
self.ignore(PetLookerAI.getStopLookingAtOtherEvent(self.pet.doId))
def observe(self, petObserve):
if petObserve.isForgettable():
if random.random() < 0.05 * self.pet.traits.forgetfulness:
return
petObserve._influence(self)
def updateLastInteractTime(self, avId):
if avId in self.lastInteractTime:
self.lastInteractTime[avId] = globalClock.getFrameTime()
def _think(self, task = None):
if not self.inMovie:
if __dev__:
self.pscPrior.start()
self._updatePriorities()
if __dev__:
self.pscPrior.stop()
if __dev__:
self.pscAware.start()
if len(self.nearbyAvs) > PetConstants.MaxAvatarAwareness:
self.nextAwarenessIndex %= len(self.nearbyAvs)
self._considerBecomeAwareOf(self.nearbyAvs.keys()[self.nextAwarenessIndex])
self.nextAwarenessIndex += 1
if __dev__:
self.pscAware.stop()
curT = globalClock.getFrameTime()
tSinceLastLonelinessUpdate = curT - self.tLastLonelinessUpdate
if tSinceLastLonelinessUpdate >= PetConstants.LonelinessUpdatePeriod:
self.tLastLonelinessUpdate = curT
numLookers = len(self.lookers)
if numLookers:
dt = tSinceLastLonelinessUpdate
self.pet.lerpMood('loneliness', max(-1.0, dt * -.003 * numLookers))
if numLookers > 5:
self.pet.lerpMood('excitement', min(1.0, dt * 0.001 * numLookers))
if __dev__:
self.pscResc.start()
taskMgr.doMethodLater(simbase.petThinkPeriod, self._think, self.getThinkTaskName())
if __dev__:
self.pscResc.stop()
return Task.done
def _updatePriorities(self):
self.goalMgr.updatePriorities()
def _handleLookingAtOtherStart(self, avId):
if avId in self.lookees:
PetBrain.notify.warning('%s: already looking at av %s' % (self.pet.doId, avId))
return
self.lookees[avId] = avId
self.observe(PetObserve.PetActionObserve(PetObserve.Actions.ATTENDING_START, avId))
def _handleLookingAtOtherStop(self, avId):
if avId not in self.lookees:
PetBrain.notify.warning('%s: not looking at av %s' % (self.pet.doId, avId))
return
del self.lookees[avId]
self.observe(PetObserve.PetActionObserve(PetObserve.Actions.ATTENDING_STOP, avId))
def _handleLookedAtByOtherStart(self, avId):
if avId in self.lookers:
PetBrain.notify.warning('%s: av %s already looking at me' % (self.pet.doId, avId))
return
self.lookers[avId] = avId
self.observe(PetObserve.PetActionObserve(PetObserve.Actions.ATTENDED_START, avId))
def _handleLookedAtByOtherStop(self, avId):
if avId not in self.lookers:
PetBrain.notify.warning('%s: av %s not looking at me' % (self.pet.doId, avId))
return
del self.lookers[avId]
self.observe(PetObserve.PetActionObserve(PetObserve.Actions.ATTENDED_STOP, avId))
def lookedAtBy(self, avId):
return avId in self.lookers
def lookingAt(self, avId):
return avId in self.lookees
def getAvIdsLookingAtUs(self):
return self.lookers
def getAvIdsWeAreLookingAt(self):
return self.lookees
def setFocus(self, object):
if isinstance(self.focus, DistributedObjectAI.DistributedObjectAI):
self.ignore(self.focus.getDeleteEvent())
self.lastInteractTime.setdefault(self.focus.doId, 0)
PetBrain.notify.debug('setFocus: %s' % object)
self.focus = object
if isinstance(self.focus, DistributedObjectAI.DistributedObjectAI):
self.accept(self.focus.getDeleteEvent(), self._handleFocusHasLeft)
def getFocus(self):
return self.focus
def clearFocus(self):
self.setFocus(None)
return
def _handleFocusHasLeft(self):
if self.focus.isEmpty():
self.chaseNode.setPos(self.pet, 0, 0, 0)
else:
self.chaseNode.setPos(self.focus, 0, 0, 0)
self._inspectSpot(self.chaseNode)
def _chase(self, target):
if callable(target):
target = target()
if target is None:
return 0
self.setFocus(target)
self.pet.actionFSM.request('Chase', target)
return 1
def _wander(self):
self.clearFocus()
self.pet.actionFSM.request('Wander')
return 1
def _unstick(self):
self.clearFocus()
self.pet.actionFSM.request('Unstick')
return 1
def _flee(self, chaser):
if callable(chaser):
chaser = chaser()
if chaser is None:
return 0
self.setFocus(chaser)
self.pet.actionFSM.request('Flee', chaser)
return 1
def _inspectSpot(self, spot = None):
if spot is None:
spot = NodePath('randomSpot')
spot.setPos(randFloat(-20, 20), randFloat(-20, 20), 0)
self.setFocus(spot)
self.pet.actionFSM.request('InspectSpot', spot)
return 1
def _stay(self, avatar):
self.setFocus(avatar)
self.pet.actionFSM.request('Stay', avatar)
return 1
def _doTrick(self, trickId, avatar):
self.setFocus(avatar)
self.pet.actionFSM.request('Trick', avatar, trickId)
return 1
def _heal(self, avatar):
if callable(avatar):
avatar = avatar()
if avatar is None:
return 0
self.setFocus(avatar)
self.pet.actionFSM.request('Heal', avatar)
return 1
def _startMovie(self):
self.setFocus(None)
self.pet.actionFSM.request('Movie')
self.inMovie = 1
return
def _endMovie(self):
self.inMovie = 0
def _handleGenericObserve(self, observe):
pass
def _handleActionObserve(self, observe):
action = observe.getAction()
avId = observe.getAvId()
OA = PetObserve.Actions
dbg = PetBrain.notify.debug
if action == OA.ATTENDED_START:
dbg('avatar %s is looking at me' % avId)
self.pet.lerpMoods({'boredom': -.1,
'excitement': 0.05,
'loneliness': -.05})
messenger.send(self.getObserveEventAttendedByAvStart(avId))
elif action == OA.ATTENDED_STOP:
dbg('avatar %s is no longer looking at me' % avId)
messenger.send(self.getObserveEventAttendedByAvStop(avId))
elif action == OA.ATTENDING_START:
dbg('I am looking at avatar %s' % avId)
messenger.send(self.getObserveEventAttendingAvStart(avId))
elif action == OA.ATTENDING_STOP:
dbg('I am no longer looking at avatar %s' % avId)
messenger.send(self.getObserveEventAttendingAvStop(avId))
elif action == OA.CHANGE_ZONE:
if avId != self.pet.doId:
oldZoneId, newZoneId = observe.getData()
PetBrain.notify.debug('%s.CHANGE_ZONE: %s, %s->%s' % (self.pet.doId,
avId,
oldZoneId,
newZoneId))
myZoneId = self.pet.zoneId
if newZoneId != oldZoneId:
if newZoneId == myZoneId:
self._handleAvatarArrive(avId)
elif oldZoneId == myZoneId:
self._handleAvatarLeave(avId)
if self.pet.inEstate:
if avId in (self.pet.ownerId, self.pet.estateOwnerId):
if oldZoneId in self.pet.estateZones and newZoneId not in self.pet.estateZones:
if avId == self.pet.ownerId:
self._handleOwnerLeave()
else:
self._handleEstateOwnerLeave()
elif action == OA.LOGOUT:
if avId == self.pet.ownerId:
self._handleOwnerLeave()
elif avId == self.pet.estateOwnerId:
self._handleEstateOwnerLeave()
elif action == OA.FEED:
dbg('avatar %s is feeding me' % avId)
self.pet.lerpMoods({'affection': 0.35,
'anger': -.07,
'boredom': -.5,
'excitement': 0.5,
'fatigue': -.2,
'hunger': -.5,
'loneliness': -.08,
'playfulness': 0.1,
'restlessness': -.05,
'sadness': -.2})
self.updateLastInteractTime(avId)
avatar = simbase.air.doId2do.get(avId)
if avatar is not None:
avatar.setHatePets(0)
elif action == OA.SCRATCH:
dbg('avatar %s is scratching me' % avId)
self.pet.lerpMoods({'affection': 0.45,
'anger': -.1,
'boredom': -.8,
'excitement': 0.5,
'fatigue': -.25,
'loneliness': -.2,
'playfulness': 0.1,
'restlessness': -.2,
'sadness': -.2})
self.updateLastInteractTime(avId)
avatar = simbase.air.doId2do.get(avId)
if avatar is not None:
avatar.setHatePets(0)
elif action == OA.GARDEN:
dbg('avatar %s is gardening' % avId)
avatar = simbase.air.doId2do.get(avId)
if avatar is not None:
if self.getFocus() == avatar:
self._wander()
return
def _handlePhraseObserve(self, observe):
def _handleGettingFriendlyAttention(avId, self = self):
self.pet.lerpMoods({'boredom': -.85,
'restlessness': -.1,
'playfulness': 0.2,
'loneliness': -.4,
'sadness': -.1,
'fatigue': -.05,
'excitement': 0.05,
'anger': -.05})
self.updateLastInteractTime(avId)
def _handleComeHere(avId, self = self):
avatar = simbase.air.doId2do.get(avId)
if avatar:
self._chase(avatar)
avatar.setHatePets(0)
def _handleFollowMe(avId, self = self):
avatar = simbase.air.doId2do.get(avId)
if avatar:
self._chase(avatar)
avatar.setHatePets(0)
def _handleStay(avId, self = self):
avatar = simbase.air.doId2do.get(avId)
if avatar:
self._stay(avatar)
def _handleCriticism(avId, self = self):
ownerFactor = 0.5
if avId == self.pet.ownerId:
ownerFactor = 1.0
self.pet.lerpMoods({'affection': -.4,
'anger': 0.4,
'boredom': -.3,
'confusion': 0.05,
'fatigue': 0.2,
'playfulness': -.1,
'sadness': 0.5 * ownerFactor})
def _handleGoAway(avId, self = self):
avatar = simbase.air.doId2do.get(avId)
if avatar is not None:
if self.getFocus() == avatar:
self._wander()
return
def _handleDoTrick(trickId, avId, self = self):
avatar = simbase.air.doId2do.get(avId)
if avatar:
if self.lookedAtBy(avatar.doId):
if not self.goalMgr.hasTrickGoal():
if not self.pet._willDoTrick(trickId):
self.pet.trickFailLogger.addEvent(trickId)
trickId = PetTricks.Tricks.BALK
trickGoal = PetGoal.DoTrick(avatar, trickId)
self.goalMgr.addGoal(trickGoal)
phrase = observe.getPetPhrase()
avId = observe.getAvId()
OP = PetObserve.Phrases
if phrase in list2dict([OP.COME,
OP.FOLLOW_ME,
OP.STAY,
OP.NEED_LAFF,
OP.NEED_GAGS,
OP.NEED_JB,
OP.HI,
OP.SOOTHE,
OP.PRAISE,
OP.HAPPY,
OP.QUESTION,
OP.FRIENDLY,
OP.LETS_PLAY,
OP.DO_TRICK]):
_handleGettingFriendlyAttention(avId)
if phrase == OP.COME:
_handleComeHere(avId)
if phrase == OP.FOLLOW_ME:
_handleFollowMe(avId)
if phrase == OP.STAY:
_handleStay(avId)
if phrase == OP.CRITICISM:
_handleCriticism(avId)
if phrase == OP.GO_AWAY:
_handleGoAway(avId)
if phrase == OP.DO_TRICK:
_handleDoTrick(observe.getTrickId(), avId)
def _addGoalsReAvatar(self, avId):
av = self.pet.air.doId2do.get(avId)
if av is None:
PetBrain.notify.warning('%s._addGoalsReAvatar: %s not in doId2do' % (self.pet.doId, avId))
return
if avId not in self.doId2goals:
goals = [PetGoal.ChaseAvatar(av), PetGoal.FleeFromAvatar(av)]
self.doId2goals[avId] = goals
self.lastInteractTime.setdefault(avId, 0)
for goal in self.doId2goals[avId]:
self.goalMgr.addGoal(goal)
return
def _removeGoalsReAvatar(self, avId):
if avId not in self.doId2goals:
PetBrain.notify.warning('no goals re av %s to remove' % avId)
return
for goal in self.doId2goals[avId]:
self.goalMgr.removeGoal(goal)
goal.destroy()
del self.doId2goals[avId]
def _considerBecomeAwareOf(self, avId):
av = simbase.air.doId2do.get(avId)
if av is None:
PetBrain.notify.warning('_considerBecomeAwareOf: av %s does not exist' % avId)
return
if avId in self.avAwareness:
return
def becomeAwareOf(avId, self = self):
self.avAwareness[avId] = None
self._addGoalsReAvatar(avId)
return
if len(self.avAwareness) < PetConstants.MaxAvatarAwareness:
becomeAwareOf(avId)
return
def calcInterest(avId, self = self):
if avId == self.pet.ownerId:
return 100.0
return random.random()
avInterest = calcInterest(avId)
minInterest = avInterest
minInterestAvId = avId
for awAvId in self.avAwareness:
i = calcInterest(awAvId)
if i < minInterest:
minInterest = i
minInterestAvId = awAvId
break
if minInterestAvId != avId:
self._removeAwarenessOf(minInterestAvId)
becomeAwareOf(avId)
return
def _removeAwarenessOf(self, avId):
if avId in self.avAwareness:
self._removeGoalsReAvatar(avId)
del self.avAwareness[avId]
def _handleAvatarArrive(self, avId):
PetBrain.notify.debug('%s._handleAvatarArrive: %s' % (self.pet.doId, avId))
if avId in self.nearbyAvs:
PetBrain.notify.warning('%s already in self.nearbyAvs' % avId)
return
self.nearbyAvs[avId] = None
excitement = 0.3
if avId == self.pet.ownerId:
excitement = 0.7
self.pet.lerpMoods({'excitement': 0.7,
'loneliness': -.4})
self._considerBecomeAwareOf(avId)
return
def _handleAvatarLeave(self, avId):
PetBrain.notify.debug('%s._handleAvatarLeave: %s' % (self.pet.doId, avId))
if avId not in self.nearbyAvs:
PetBrain.notify.warning('av %s not in self.nearbyAvs' % avId)
return
del self.nearbyAvs[avId]
self.pet.lerpMoods({'loneliness': 0.1})
self._removeAwarenessOf(avId)
def _handleOwnerLeave(self):
self.pet.teleportOut()
taskMgr.doMethodLater(PetConstants.TELEPORT_OUT_DURATION, self.pet.requestDelete, self.getTeleportTaskName())
def _handleEstateOwnerLeave(self):
self.pet.teleportOut()
taskMgr.doMethodLater(PetConstants.TELEPORT_OUT_DURATION, self.pet.requestDelete, self.getTeleportTaskName())
|
mit
|
bitcraft/matrix
|
matrix.py
|
1
|
6431
|
""" Matrix Screen Effect
(c) 2016, Leif Theden, [email protected]
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from itertools import chain
from itertools import product
from math import sqrt
from os.path import join
from random import random, randrange, choice
import pygame
# Configuration
charset = """abcdefghijklmnopqrstuvwxzy0123456789$+-*/=%"'#&_(),.;:?!\|{}<>[]^~"""
font_name = join('resources', 'matrix code nfi.otf')
background = join('resources', 'python.png')
font_size = 32
screen_size = 640, 480
glyph_width = 14
glyph_height = 16
grid_spacing_x = 2
grid_spacing_y = 2
streamers = 0
computed_values = list()
layout = list()
glyphs = list()
burn_set = set()
max_streamers = 0
cache = list()
save_to_disk = 0
logo = None
class Glyph(object):
pos = 0, 0
ttl = 0
index = 0
def calc_color(value):
value *= 255.
if value > 190:
value1 = int(round(value))
value2 = int((value * 255) / 300)
return value2, value1, value2
else:
value1 = int((value * 255.) / 300.)
value2 = int((value * 255) / 800)
return value2, value1, value2
def burn_glyph(glyph):
glyph.ttl = len(computed_values) - 1
burn_set.add(glyph)
def update_burners():
# go through the bright streamers
old_set = burn_set.copy()
to_remove = set()
for glyph in old_set:
value = computed_values[glyph.ttl]
if value < .85:
to_remove.add(glyph)
x = glyph.pos[0] // (glyph_width + grid_spacing_x)
y = glyph.pos[1] // (glyph_height + grid_spacing_y)
try:
new = layout[y + 1][x]
burn_glyph(new)
except IndexError:
pass
elif not glyph.ttl:
to_remove.add(glyph)
# remove the cells that are fading
burn_set.difference_update(to_remove)
# add new glyphs to make up for lost ones
# using random in this way prevents horizontal lines
current = len(burn_set)
ratio = min(1, (float(current) / max_streamers / 2))
if current < max_streamers and random() > ratio:
glyph = choice(layout[0])
burn_glyph(glyph)
def init_screen(width, height):
global screen_size
screen_size = width, height
return pygame.display.set_mode(screen_size, pygame.RESIZABLE)
def init_grid(width, height):
global max_streamers
global glyphs
global layout
global logo
logo = pygame.image.load(background).convert_alpha()
logo = pygame.transform.smoothscale(logo, screen_size)
cell_width = glyph_width + grid_spacing_x
cell_height = glyph_height + grid_spacing_y
grid_width = width // cell_width
grid_height = height // cell_height
max_streamers = int((grid_width * grid_height) / 10)
layout = [[None] * grid_width for i in range(grid_height)]
for y, x in product(range(grid_height), range(grid_width)):
glyph = Glyph()
glyph.ttl = randrange(len(computed_values))
glyph.index = randrange(len(charset))
glyph.pos = x * cell_width, y * cell_height
layout[y][x] = glyph
glyphs = [i for i in chain.from_iterable(layout)]
def generate_images(font):
# generate a scanline image to create scanline effect
scanline = pygame.Surface((glyph_width, glyph_height), pygame.SRCALPHA)
for y in range(0, glyph_height, 2):
pygame.draw.line(scanline, (0, 0, 0, 128), (0, y), (glyph_width, y))
# render all characters a head of time
for char in charset:
chars = list()
cache.append(chars)
for value in computed_values:
color = calc_color(value)
temp = font.render(char, 1, color)
temp = pygame.transform.smoothscale(temp, (glyph_width, glyph_height))
temp.blit(scanline, (0, 0))
image = pygame.Surface(temp.get_size())
image.blit(temp, (0, 0))
chars.append(image)
def compute_curve():
# compute the color curve for the streamers
time = 0.
duration = 5000.
prog = 0
while prog < 1:
prog = min(1., time / duration)
p = prog - 1.0
value = 1. - sqrt(1.0 - p * p)
computed_values.insert(0, value)
time += 16
def main():
pygame.init()
pygame.font.init()
screen = init_screen(*screen_size)
font = pygame.font.Font(font_name, font_size)
clock = pygame.time.Clock()
compute_curve()
generate_images(font)
init_grid(*screen_size)
frame_number = 0
running = True
while running:
if not save_to_disk:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.VIDEORESIZE:
if not screen_size == (event.w, event.h):
screen = init_screen(event.w, event.h)
init_grid(event.w, event.h)
burn_set.clear()
elif event.type == pygame.QUIT:
running = False
update_burners()
# update and draw grid to the screen
screen_blit = screen.blit
for glyph in (i for i in glyphs if i.ttl):
# have random chance to change the glyph
if random() > .9:
glyph.index = randrange(len(charset))
# update the glyphs's life and image
# if it becomes 0, then it won't be updated next frame
glyph.ttl -= 1
# get image and draw it
screen_blit(cache[glyph.index][glyph.ttl], glyph.pos)
screen_blit(logo, (0, 0), None, pygame.BLEND_RGBA_MULT)
if save_to_disk:
filename = "snapshot%05d.tga" % frame_number
pygame.image.save(screen, filename)
frame_number += 1
else:
pygame.display.flip()
if __name__ == "__main__":
main()
|
gpl-3.0
|
thomashaw/SecGen
|
modules/utilities/unix/audit_tools/ghidra/files/release/Ghidra/Features/Python/data/jython-2.7.1/Lib/_LWPCookieJar.py
|
109
|
6555
|
"""Load / save to libwww-perl (LWP) format files.
Actually, the format is slightly extended from that used by LWP's
(libwww-perl's) HTTP::Cookies, to avoid losing some RFC 2965 information
not recorded by LWP.
It uses the version string "2.0", though really there isn't an LWP Cookies
2.0 format. This indicates that there is extra information in here
(domain_dot and # port_spec) while still being compatible with
libwww-perl, I hope.
"""
import time, re
from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
Cookie, MISSING_FILENAME_TEXT,
join_header_words, split_header_words,
iso2time, time2isoz)
def lwp_cookie_str(cookie):
"""Return string representation of Cookie in an the LWP cookie file format.
Actually, the format is extended a bit -- see module docstring.
"""
h = [(cookie.name, cookie.value),
("path", cookie.path),
("domain", cookie.domain)]
if cookie.port is not None: h.append(("port", cookie.port))
if cookie.path_specified: h.append(("path_spec", None))
if cookie.port_specified: h.append(("port_spec", None))
if cookie.domain_initial_dot: h.append(("domain_dot", None))
if cookie.secure: h.append(("secure", None))
if cookie.expires: h.append(("expires",
time2isoz(float(cookie.expires))))
if cookie.discard: h.append(("discard", None))
if cookie.comment: h.append(("comment", cookie.comment))
if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
keys = cookie._rest.keys()
keys.sort()
for k in keys:
h.append((k, str(cookie._rest[k])))
h.append(("version", str(cookie.version)))
return join_header_words([h])
class LWPCookieJar(FileCookieJar):
"""
The LWPCookieJar saves a sequence of "Set-Cookie3" lines.
"Set-Cookie3" is the format used by the libwww-perl libary, not known
to be compatible with any browser, but which is easy to read and
doesn't lose information about RFC 2965 cookies.
Additional methods
as_lwp_str(ignore_discard=True, ignore_expired=True)
"""
def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
"""Return cookies as a string of "\\n"-separated "Set-Cookie3" headers.
ignore_discard and ignore_expires: see docstring for FileCookieJar.save
"""
now = time.time()
r = []
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
return "\n".join(r+[""])
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
# There really isn't an LWP Cookies 2.0 format, but this indicates
# that there is extra information in here (domain_dot and
# port_spec) while still being compatible with libwww-perl, I hope.
f.write("#LWP-Cookies-2.0\n")
f.write(self.as_lwp_str(ignore_discard, ignore_expires))
finally:
f.close()
def _really_load(self, f, filename, ignore_discard, ignore_expires):
magic = f.readline()
if not re.search(self.magic_re, magic):
msg = ("%r does not look like a Set-Cookie3 (LWP) format "
"file" % filename)
raise LoadError(msg)
now = time.time()
header = "Set-Cookie3:"
boolean_attrs = ("port_spec", "path_spec", "domain_dot",
"secure", "discard")
value_attrs = ("version",
"port", "path", "domain",
"expires",
"comment", "commenturl")
try:
while 1:
line = f.readline()
if line == "": break
if not line.startswith(header):
continue
line = line[len(header):].strip()
for data in split_header_words([line]):
name, value = data[0]
standard = {}
rest = {}
for k in boolean_attrs:
standard[k] = False
for k, v in data[1:]:
if k is not None:
lc = k.lower()
else:
lc = None
# don't lose case distinction for unknown fields
if (lc in value_attrs) or (lc in boolean_attrs):
k = lc
if k in boolean_attrs:
if v is None: v = True
standard[k] = v
elif k in value_attrs:
standard[k] = v
else:
rest[k] = v
h = standard.get
expires = h("expires")
discard = h("discard")
if expires is not None:
expires = iso2time(expires)
if expires is None:
discard = True
domain = h("domain")
domain_specified = domain.startswith(".")
c = Cookie(h("version"), name, value,
h("port"), h("port_spec"),
domain, domain_specified, h("domain_dot"),
h("path"), h("path_spec"),
h("secure"),
expires,
discard,
h("comment"),
h("commenturl"),
rest)
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except IOError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Set-Cookie3 format file %r: %r" %
(filename, line))
|
gpl-3.0
|
bingerZhang/libyuv
|
setup_links.py
|
140
|
16181
|
#!/usr/bin/env python
# Copyright 2014 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Setup links to a Chromium checkout for Libyuv.
Libyuv shares a lot of dependencies and build tools with Chromium.
To do this, many of the paths of a Chromium checkout is emulated by creating
symlinks to files and directories. This script handles the setup of symlinks to
achieve this.
It's a modified copy of the similar script that lives in WebRTC.
It also handles cleanup of the legacy Subversion-based approach that was used
before Chrome switched over their master repo from Subversion to Git.
"""
import ctypes
import errno
import logging
import optparse
import os
import shelve
import shutil
import subprocess
import sys
import textwrap
DIRECTORIES = [
'build',
'buildtools',
'google_apis', # Needed by build/common.gypi.
'net',
'testing',
'third_party/android_testrunner',
'third_party/android_tools',
'third_party/binutils',
'third_party/libjpeg',
'third_party/libjpeg_turbo',
'third_party/libudev',
'third_party/llvm-build',
'third_party/nss',
'third_party/yasm',
'tools/android',
'tools/clang',
'tools/generate_library_loader',
'tools/gn',
'tools/gyp',
'tools/memory',
'tools/python',
'tools/valgrind',
'tools/win',
]
FILES = {
'.gn': None,
'tools/find_depot_tools.py': None,
'third_party/BUILD.gn': None,
}
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
CHROMIUM_CHECKOUT = os.path.join('chromium', 'src')
LINKS_DB = 'links'
# Version management to make future upgrades/downgrades easier to support.
SCHEMA_VERSION = 1
def query_yes_no(question, default=False):
"""Ask a yes/no question via raw_input() and return their answer.
Modified from http://stackoverflow.com/a/3041990.
"""
prompt = " [%s/%%s]: "
prompt = prompt % ('Y' if default is True else 'y')
prompt = prompt % ('N' if default is False else 'n')
if default is None:
default = 'INVALID'
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if choice == '' and default != 'INVALID':
return default
if 'yes'.startswith(choice):
return True
elif 'no'.startswith(choice):
return False
print "Please respond with 'yes' or 'no' (or 'y' or 'n')."
# Actions
class Action(object):
def __init__(self, dangerous):
self.dangerous = dangerous
def announce(self, planning):
"""Log a description of this action.
Args:
planning - True iff we're in the planning stage, False if we're in the
doit stage.
"""
pass
def doit(self, links_db):
"""Execute the action, recording what we did to links_db, if necessary."""
pass
class Remove(Action):
def __init__(self, path, dangerous):
super(Remove, self).__init__(dangerous)
self._priority = 0
self._path = path
def announce(self, planning):
log = logging.warn
filesystem_type = 'file'
if not self.dangerous:
log = logging.info
filesystem_type = 'link'
if planning:
log('Planning to remove %s: %s', filesystem_type, self._path)
else:
log('Removing %s: %s', filesystem_type, self._path)
def doit(self, _links_db):
os.remove(self._path)
class Rmtree(Action):
def __init__(self, path):
super(Rmtree, self).__init__(dangerous=True)
self._priority = 0
self._path = path
def announce(self, planning):
if planning:
logging.warn('Planning to remove directory: %s', self._path)
else:
logging.warn('Removing directory: %s', self._path)
def doit(self, _links_db):
if sys.platform.startswith('win'):
# shutil.rmtree() doesn't work on Windows if any of the directories are
# read-only, which svn repositories are.
subprocess.check_call(['rd', '/q', '/s', self._path], shell=True)
else:
shutil.rmtree(self._path)
class Makedirs(Action):
def __init__(self, path):
super(Makedirs, self).__init__(dangerous=False)
self._priority = 1
self._path = path
def doit(self, _links_db):
try:
os.makedirs(self._path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class Symlink(Action):
def __init__(self, source_path, link_path):
super(Symlink, self).__init__(dangerous=False)
self._priority = 2
self._source_path = source_path
self._link_path = link_path
def announce(self, planning):
if planning:
logging.info(
'Planning to create link from %s to %s', self._link_path,
self._source_path)
else:
logging.debug(
'Linking from %s to %s', self._link_path, self._source_path)
def doit(self, links_db):
# Files not in the root directory need relative path calculation.
# On Windows, use absolute paths instead since NTFS doesn't seem to support
# relative paths for symlinks.
if sys.platform.startswith('win'):
source_path = os.path.abspath(self._source_path)
else:
if os.path.dirname(self._link_path) != self._link_path:
source_path = os.path.relpath(self._source_path,
os.path.dirname(self._link_path))
os.symlink(source_path, os.path.abspath(self._link_path))
links_db[self._source_path] = self._link_path
class LinkError(IOError):
"""Failed to create a link."""
pass
# Handles symlink creation on the different platforms.
if sys.platform.startswith('win'):
def symlink(source_path, link_path):
flag = 1 if os.path.isdir(source_path) else 0
if not ctypes.windll.kernel32.CreateSymbolicLinkW(
unicode(link_path), unicode(source_path), flag):
raise OSError('Failed to create symlink to %s. Notice that only NTFS '
'version 5.0 and up has all the needed APIs for '
'creating symlinks.' % source_path)
os.symlink = symlink
class LibyuvLinkSetup():
def __init__(self, links_db, force=False, dry_run=False, prompt=False):
self._force = force
self._dry_run = dry_run
self._prompt = prompt
self._links_db = links_db
def CreateLinks(self, on_bot):
logging.debug('CreateLinks')
# First, make a plan of action
actions = []
for source_path, link_path in FILES.iteritems():
actions += self._ActionForPath(
source_path, link_path, check_fn=os.path.isfile, check_msg='files')
for source_dir in DIRECTORIES:
actions += self._ActionForPath(
source_dir, None, check_fn=os.path.isdir,
check_msg='directories')
if not on_bot and self._force:
# When making the manual switch from legacy SVN checkouts to the new
# Git-based Chromium DEPS, the .gclient_entries file that contains cached
# URLs for all DEPS entries must be removed to avoid future sync problems.
entries_file = os.path.join(os.path.dirname(ROOT_DIR), '.gclient_entries')
if os.path.exists(entries_file):
actions.append(Remove(entries_file, dangerous=True))
actions.sort()
if self._dry_run:
for action in actions:
action.announce(planning=True)
logging.info('Not doing anything because dry-run was specified.')
sys.exit(0)
if any(a.dangerous for a in actions):
logging.warn('Dangerous actions:')
for action in (a for a in actions if a.dangerous):
action.announce(planning=True)
print
if not self._force:
logging.error(textwrap.dedent("""\
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
A C T I O N R E Q I R E D
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
Because chromium/src is transitioning to Git (from SVN), we needed to
change the way that the Libyuv standalone checkout works. Instead of
individually syncing subdirectories of Chromium in SVN, we're now
syncing Chromium (and all of its DEPS, as defined by its own DEPS file),
into the `chromium/src` directory.
As such, all Chromium directories which are currently pulled by DEPS are
now replaced with a symlink into the full Chromium checkout.
To avoid disrupting developers, we've chosen to not delete your
directories forcibly, in case you have some work in progress in one of
them :).
ACTION REQUIRED:
Before running `gclient sync|runhooks` again, you must run:
%s%s --force
Which will replace all directories which now must be symlinks, after
prompting with a summary of the work-to-be-done.
"""), 'python ' if sys.platform.startswith('win') else '', sys.argv[0])
sys.exit(1)
elif self._prompt:
if not query_yes_no('Would you like to perform the above plan?'):
sys.exit(1)
for action in actions:
action.announce(planning=False)
action.doit(self._links_db)
if not on_bot and self._force:
logging.info('Completed!\n\nNow run `gclient sync|runhooks` again to '
'let the remaining hooks (that probably were interrupted) '
'execute.')
def CleanupLinks(self):
logging.debug('CleanupLinks')
for source, link_path in self._links_db.iteritems():
if source == 'SCHEMA_VERSION':
continue
if os.path.islink(link_path) or sys.platform.startswith('win'):
# os.path.islink() always returns false on Windows
# See http://bugs.python.org/issue13143.
logging.debug('Removing link to %s at %s', source, link_path)
if not self._dry_run:
if os.path.exists(link_path):
if sys.platform.startswith('win') and os.path.isdir(link_path):
subprocess.check_call(['rmdir', '/q', link_path], shell=True)
else:
os.remove(link_path)
del self._links_db[source]
@staticmethod
def _ActionForPath(source_path, link_path=None, check_fn=None,
check_msg=None):
"""Create zero or more Actions to link to a file or directory.
This will be a symlink on POSIX platforms. On Windows this requires
that NTFS is version 5.0 or higher (Vista or newer).
Args:
source_path: Path relative to the Chromium checkout root.
For readability, the path may contain slashes, which will
automatically be converted to the right path delimiter on Windows.
link_path: The location for the link to create. If omitted it will be the
same path as source_path.
check_fn: A function returning true if the type of filesystem object is
correct for the attempted call. Otherwise an error message with
check_msg will be printed.
check_msg: String used to inform the user of an invalid attempt to create
a file.
Returns:
A list of Action objects.
"""
def fix_separators(path):
if sys.platform.startswith('win'):
return path.replace(os.altsep, os.sep)
else:
return path
assert check_fn
assert check_msg
link_path = link_path or source_path
link_path = fix_separators(link_path)
source_path = fix_separators(source_path)
source_path = os.path.join(CHROMIUM_CHECKOUT, source_path)
if os.path.exists(source_path) and not check_fn:
raise LinkError('_LinkChromiumPath can only be used to link to %s: '
'Tried to link to: %s' % (check_msg, source_path))
if not os.path.exists(source_path):
logging.debug('Silently ignoring missing source: %s. This is to avoid '
'errors on platform-specific dependencies.', source_path)
return []
actions = []
if os.path.exists(link_path) or os.path.islink(link_path):
if os.path.islink(link_path):
actions.append(Remove(link_path, dangerous=False))
elif os.path.isfile(link_path):
actions.append(Remove(link_path, dangerous=True))
elif os.path.isdir(link_path):
actions.append(Rmtree(link_path))
else:
raise LinkError('Don\'t know how to plan: %s' % link_path)
# Create parent directories to the target link if needed.
target_parent_dirs = os.path.dirname(link_path)
if (target_parent_dirs and
target_parent_dirs != link_path and
not os.path.exists(target_parent_dirs)):
actions.append(Makedirs(target_parent_dirs))
actions.append(Symlink(source_path, link_path))
return actions
def _initialize_database(filename):
links_database = shelve.open(filename)
# Wipe the database if this version of the script ends up looking at a
# newer (future) version of the links db, just to be sure.
version = links_database.get('SCHEMA_VERSION')
if version and version != SCHEMA_VERSION:
logging.info('Found database with schema version %s while this script only '
'supports %s. Wiping previous database contents.', version,
SCHEMA_VERSION)
links_database.clear()
links_database['SCHEMA_VERSION'] = SCHEMA_VERSION
return links_database
def main():
on_bot = os.environ.get('CHROME_HEADLESS') == '1'
parser = optparse.OptionParser()
parser.add_option('-d', '--dry-run', action='store_true', default=False,
help='Print what would be done, but don\'t perform any '
'operations. This will automatically set logging to '
'verbose.')
parser.add_option('-c', '--clean-only', action='store_true', default=False,
help='Only clean previously created links, don\'t create '
'new ones. This will automatically set logging to '
'verbose.')
parser.add_option('-f', '--force', action='store_true', default=on_bot,
help='Force link creation. CAUTION: This deletes existing '
'folders and files in the locations where links are '
'about to be created.')
parser.add_option('-n', '--no-prompt', action='store_false', dest='prompt',
default=(not on_bot),
help='Prompt if we\'re planning to do a dangerous action')
parser.add_option('-v', '--verbose', action='store_const',
const=logging.DEBUG, default=logging.INFO,
help='Print verbose output for debugging.')
options, _ = parser.parse_args()
if options.dry_run or options.force or options.clean_only:
options.verbose = logging.DEBUG
logging.basicConfig(format='%(message)s', level=options.verbose)
# Work from the root directory of the checkout.
script_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_dir)
if sys.platform.startswith('win'):
def is_admin():
try:
return os.getuid() == 0
except AttributeError:
return ctypes.windll.shell32.IsUserAnAdmin() != 0
if not is_admin():
logging.error('On Windows, you now need to have administrator '
'privileges for the shell running %s (or '
'`gclient sync|runhooks`).\nPlease start another command '
'prompt as Administrator and try again.' % sys.argv[0])
return 1
if not os.path.exists(CHROMIUM_CHECKOUT):
logging.error('Cannot find a Chromium checkout at %s. Did you run "gclient '
'sync" before running this script?', CHROMIUM_CHECKOUT)
return 2
links_database = _initialize_database(LINKS_DB)
try:
symlink_creator = LibyuvLinkSetup(links_database, options.force,
options.dry_run, options.prompt)
symlink_creator.CleanupLinks()
if not options.clean_only:
symlink_creator.CreateLinks(on_bot)
except LinkError as e:
print >> sys.stderr, e.message
return 3
finally:
links_database.close()
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
dstufft/warehouse
|
warehouse/accounts/models.py
|
1
|
4012
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from citext import CIText
from sqlalchemy import (
CheckConstraint,
Column,
Enum,
ForeignKey,
Index,
UniqueConstraint,
Boolean,
DateTime,
Integer,
String,
)
from sqlalchemy import orm, select, sql
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.ext.hybrid import hybrid_property
from warehouse import db
from warehouse.sitemap.models import SitemapMixin
from warehouse.utils.attrs import make_repr
class UserFactory:
def __init__(self, request):
self.request = request
def __getitem__(self, username):
try:
return self.request.db.query(User).filter(User.username == username).one()
except NoResultFound:
raise KeyError from None
class DisableReason(enum.Enum):
CompromisedPassword = "password compromised"
class User(SitemapMixin, db.Model):
__tablename__ = "users"
__table_args__ = (
CheckConstraint("length(username) <= 50", name="users_valid_username_length"),
CheckConstraint(
"username ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'",
name="users_valid_username",
),
)
__repr__ = make_repr("username")
username = Column(CIText, nullable=False, unique=True)
name = Column(String(length=100), nullable=False)
password = Column(String(length=128), nullable=False)
password_date = Column(DateTime, nullable=True, server_default=sql.func.now())
is_active = Column(Boolean, nullable=False)
is_superuser = Column(Boolean, nullable=False)
date_joined = Column(DateTime, server_default=sql.func.now())
last_login = Column(DateTime, nullable=False, server_default=sql.func.now())
disabled_for = Column(
Enum(DisableReason, values_callable=lambda x: [e.value for e in x]),
nullable=True,
)
emails = orm.relationship(
"Email", backref="user", cascade="all, delete-orphan", lazy=False
)
@property
def primary_email(self):
primaries = [x for x in self.emails if x.primary]
if primaries:
return primaries[0]
@hybrid_property
def email(self):
primary_email = self.primary_email
return primary_email.email if primary_email else None
@email.expression
def email(self):
return (
select([Email.email])
.where((Email.user_id == self.id) & (Email.primary.is_(True)))
.as_scalar()
)
class UnverifyReasons(enum.Enum):
SpamComplaint = "spam complaint"
HardBounce = "hard bounce"
SoftBounce = "soft bounce"
class Email(db.ModelBase):
__tablename__ = "user_emails"
__table_args__ = (
UniqueConstraint("email", name="user_emails_email_key"),
Index("user_emails_user_id", "user_id"),
)
id = Column(Integer, primary_key=True, nullable=False)
user_id = Column(
UUID(as_uuid=True),
ForeignKey("users.id", deferrable=True, initially="DEFERRED"),
nullable=False,
)
email = Column(String(length=254), nullable=False)
primary = Column(Boolean, nullable=False)
verified = Column(Boolean, nullable=False)
# Deliverability information
unverify_reason = Column(
Enum(UnverifyReasons, values_callable=lambda x: [e.value for e in x]),
nullable=True,
)
transient_bounces = Column(Integer, nullable=False, server_default=sql.text("0"))
|
apache-2.0
|
madmachinations/discordbot
|
plugins/msglog/__init__.py
|
2
|
11208
|
#!/usr/bin/python
import __main__
import json
#===================================================================================================================
#PLUGIN CALLS
async def help_menu():
help_info = {}
help_info['title'] = 'Message log'
help_info['description'] = 'Search logged messages.'
return help_info
async def help_section():
help_info = {}
cmd_name = 'log'
help_info[cmd_name] = []
help_entry = {}
help_entry['command'] = 'deleted_by'
help_entry['args'] = '@someuser'
help_entry['description'] = 'Show all messages deleted or edited recently by @someuser.'
help_entry['perm_name'] = 'read_deleted'
help_info[cmd_name].append(help_entry)
help_entry = {}
help_entry['command'] = 'deleted_in'
help_entry['args'] = 'channel_name'
help_entry['description'] = 'Show all messages deleted or edited recently in channel_name.'
help_entry['perm_name'] = 'read_deleted'
help_info[cmd_name].append(help_entry)
return help_info
async def plugin_permissions():
perm_info = {}
this_perm = 'read_deleted'
perm_info[this_perm] = {}
perm_info[this_perm]['groups'] = [] #members/admins/owner
perm_info[this_perm]['groups'].append('admins')
return perm_info
async def server_setup_wizard():
return True
#===================================================================================================================
#SERVER EVENTS
async def server_join(server): pass
async def server_remove(server): pass
async def server_update(before,after): pass
async def server_connected(server): pass
#===================================================================================================================
#MESSAGE EVENTS
async def message_process(message):
bot_cmd_char = await __main__.get_cmd_char(message.server)
if(message.content.startswith(bot_cmd_char+'log')):
chk_user_perm = await __main__.has_perm_to_run(message.server,message,message.author.id,'msglog','read_deleted',True)
if(chk_user_perm == True):
proc_msg = await __main__.get_cmd_args(message.content)
proc_msg_length = len(proc_msg)
use_server_id = await __main__.hash_server_id(message.server.id)
found_messages = False
if(proc_msg[1] == "deleted_in"):
#list users who have deleted messages in this channel, how many and when the latest was
check_channel = await __main__.find_channel_arg(message.server,proc_msg[2],True)
if(check_channel != False):
use_channel_id = await __main__.hash_member_id(message.server.id,check_channel.id)
found_messages = []
get_msg = __main__.db.cursor()
get_msg.execute("SELECT * FROM deleted_msgs WHERE server_id=? AND channel_id=? ORDER BY del_date DESC LIMIT 23",(use_server_id,use_channel_id,))
for row in get_msg:
new_record = {}
new_record['channel_id'] = await __main__.decrypt_data(row['stored_channel_id'])
new_record['user_id'] = await __main__.decrypt_data(row['stored_user_id'])
new_record['del_date'] = row['del_date']
new_record['msg_date'] = row['msg_date']
new_record['msg_content'] = await __main__.decrypt_data(row['msg_content'])
new_record['msg_embeds'] = await __main__.decrypt_data(row['msg_embeds'])
new_record['new_content'] = await __main__.decrypt_data(row['new_content'])
new_record['new_embeds'] = await __main__.decrypt_data(row['new_embeds'])
found_messages.append(new_record)
else: await __main__.client.send_message(message.channel,'Sorry <@'+message.author.id+'>, I couldn\'t find a channel with that name. Please try again.')
if(proc_msg[1] == "deleted_by"):
#list messages deleted by this user in each channel
check_user = await __main__.find_user(message.server,proc_msg[2],True)
if(check_user != False):
use_member_id = await __main__.hash_member_id(message.server.id,check_user.id)
found_messages = []
get_msg = __main__.db.cursor()
get_msg.execute("SELECT * FROM deleted_msgs WHERE server_id=? AND user_id=? ORDER BY del_date DESC LIMIT 23",(use_server_id,use_member_id,))
for row in get_msg:
new_record = {}
new_record['channel_id'] = await __main__.decrypt_data(row['stored_channel_id'])
new_record['user_id'] = await __main__.decrypt_data(row['stored_user_id'])
new_record['del_date'] = row['del_date']
new_record['msg_date'] = row['msg_date']
new_record['msg_content'] = await __main__.decrypt_data(row['msg_content'])
new_record['msg_embeds'] = await __main__.decrypt_data(row['msg_embeds'])
new_record['new_content'] = await __main__.decrypt_data(row['new_content'])
new_record['new_embeds'] = await __main__.decrypt_data(row['new_embeds'])
found_messages.append(new_record)
else: await __main__.client.send_message(message.channel,'Sorry <@'+message.author.id+'>, I couldn\'t find a member with that name. Please try again.')
if(found_messages != False and len(found_messages) > 0):
icon_url = __main__.client.user.avatar_url
if(icon_url == None or icon_url == ""): icon_url = __main__.client.user.default_avatar_url
repost_title = 'Deleted/edited message log results'
em = __main__.discord.Embed(title=repost_title, description='Here are the logs of deleted or edited messages which were found:', colour=3447003)
em.set_author(name=__main__.client.user.id, icon_url=icon_url)
field_count = 0
for del_msg in found_messages:
use_author = await __main__.find_user(message.server,'<@'+del_msg['user_id']+'>',True)
use_channel = await __main__.find_channel_arg(message.server,'<#'+del_msg['channel_id']+'>',True)
if(use_author != False and use_channel != False):
use_date = await __main__.timestamp_to_date_short(del_msg['del_date'])
use_name = use_author.display_name+' ('+use_author.name+'#'+use_author.discriminator+') deleted in #'+use_channel.name+' at '+str(use_date)
use_content = del_msg['msg_content']
em.add_field(name=use_name,value=use_content,inline=False)
field_count = field_count + 1
await __main__.client.send_message(message.channel, embed=em)
elif(found_messages != False):
await __main__.client.send_message(message.channel,'Sorry <@'+message.author.id+'>, I couldn\'t find any deleted or edited message records to display.')
async def message_new(message): pass
async def message_edit(before,after):
use_server_id = await __main__.hash_server_id(before.server.id)
use_channel_id = await __main__.hash_member_id(before.server.id,before.channel.id)
save_channel_id = await __main__.encrypt_data(before.channel.id)
use_member_id = await __main__.hash_member_id(before.server.id,before.author.id)
save_member_id = await __main__.encrypt_data(before.author.id)
del_date = await __main__.current_timestamp()
msg_time = before.timestamp.timestamp()
save_msg_content = await __main__.encrypt_data(before.content)
save_msg_embeds = await __main__.encrypt_data(json.dumps(before.attachments))
save_new_content = await __main__.encrypt_data(after.content)
save_new_embeds = await __main__.encrypt_data(json.dumps(after.attachments))
save_dm = __main__.db.cursor()
save_dm.execute("INSERT INTO deleted_msgs (server_id,channel_id,stored_channel_id,user_id,stored_user_id,del_date,msg_date,msg_content,msg_embeds,new_content,new_embeds) VALUES (?,?,?,?,?,?,?,?,?,?,?)",(use_server_id,use_channel_id,save_channel_id,use_member_id,save_member_id,del_date,msg_time,save_msg_content,save_msg_embeds,save_new_content,save_new_embeds,))
__main__.db.commit()
async def message_delete(message):
use_server_id = await __main__.hash_server_id(message.server.id)
use_channel_id = await __main__.hash_member_id(message.server.id,message.channel.id)
save_channel_id = await __main__.encrypt_data(message.channel.id)
use_member_id = await __main__.hash_member_id(message.server.id,message.author.id)
save_member_id = await __main__.encrypt_data(message.author.id)
del_date = await __main__.current_timestamp()
msg_time = message.timestamp.timestamp()
save_msg_content = await __main__.encrypt_data(message.content)
save_msg_embeds = await __main__.encrypt_data(json.dumps(message.attachments))
save_dm = __main__.db.cursor()
save_dm.execute("INSERT INTO deleted_msgs (server_id,channel_id,stored_channel_id,user_id,stored_user_id,del_date,msg_date,msg_content,msg_embeds) VALUES (?,?,?,?,?,?,?,?,?)",(use_server_id,use_channel_id,save_channel_id,use_member_id,save_member_id,del_date,msg_time,save_msg_content,save_msg_embeds,))
__main__.db.commit()
async def message_typing(channel,user,datestamp): pass
#===================================================================================================================
#MESSAGE REACTION EVENTS
async def reaction_add(reaction,user): pass
async def reaction_remove(reaction,user): pass
#===================================================================================================================
#CHANNEL EVENTS
async def channel_create(channel): pass
async def channel_delete(channel): pass
async def channel_update(before,after): pass
#===================================================================================================================
#MEMBER EVENTS
async def member_join(member): pass
async def member_remove(member): pass
async def member_update(before,after): pass
async def member_voice_update(before,after): pass
async def member_ban(member): pass
async def member_unban(server,user): pass
#===================================================================================================================
#ROLE EVENTS
async def role_create(role): pass
async def role_delete(role): pass
async def role_update(before,after): pass
#===================================================================================================================
#EMOJI LIST EVENTS
async def emoji_list_update(before,after): pass
#===================================================================================================================
#GROUP CHAT EVENTS
async def group_join(channel,user): pass
async def group_remove(channel,user): pass
|
gpl-3.0
|
zhangg/docker
|
vendor/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py
|
1232
|
3478
|
#!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
import msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
"someday",
"",
"bytestring",
1328176922000002000,
-2206187877999998000,
0,
-6795364578871345152
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
packer = msgpack.Packer()
serialized = packer.pack(l[i])
f = open(os.path.join(destdir, str(i) + '.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: msgpack_test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
|
apache-2.0
|
chaosmaster/christian
|
commands/postboxfunctions.py
|
2
|
1419
|
from utils import Filehandler
from ConfigParser import SafeConfigParser
import re
class PostboxFunctions(object):
def __init__(self):
self.fhandler = Filehandler()
def help(self, channel, callback, msg=None, nck=None, hq=None, keys=None, **kwargs):
helpmsg = "!tell <user> - Store message in <user>s postbox.\n"
callback.msg(nck, helpmsg)
def tell(self, channel, callback, msg=None, nck=None, pb=None, **kwargs):
accessfile = pb.accessfile
if len(msg) < 2:
callback.say(channel,'Syntax: !tell [receipient] [message]')
else:
try:
receipient = msg[0]
mbstatus=self.fhandler.onaccesslist(receipient, accessfile)
if mbstatus == 1:
msgstring=" ".join(msg[1:])
if re.search('[a-zA-Z0-9]+',msgstring) is not None:
#strip away / and .. from message
msgstring = msgstring.translate(None, './')
pb.savemessage(nck,receipient,msgstring)
callback.say(channel, 'Message saved')
else:
callback.say(channel, 'Message can\'t be empty')
else:
callback.say(channel,'Unknown user {0}'.format(receipient))
except:
raise Exception('Could not save message.')
|
gpl-3.0
|
HalcyonChimera/osf.io
|
api_tests/logs/serializers/test_serializers.py
|
17
|
1820
|
import pytest
from framework.auth import Auth
from osf.models import NodeLog
from api.logs.serializers import NodeLogSerializer
from osf_tests.factories import ProjectFactory, UserFactory
from tests.utils import make_drf_request_with_version
pytestmark = pytest.mark.django_db
class TestNodeLogSerializer:
# Regression test for https://openscience.atlassian.net/browse/PLAT-758
def test_serializing_log_with_legacy_non_registered_contributor_data(self, fake):
# Old logs store unregistered contributors in params as dictionaries of the form:
# {
# 'nr_email': <email>,
# 'nr_name': <name>,
# }
# This test ensures that the NodeLogSerializer can handle this legacy data.
project = ProjectFactory()
user = UserFactory()
request = make_drf_request_with_version()
nr_data = {'nr_email': fake.email(), 'nr_name': fake.name()}
log = project.add_log(
action=NodeLog.CONTRIB_ADDED,
auth=Auth(project.creator),
params={
'project': project._id,
'node': project._id,
'contributors': [user._id, nr_data],
}
)
serialized = NodeLogSerializer(log, context={'request': request}).data
contributor_data = serialized['data']['attributes']['params']['contributors']
# contributor_data will have two dicts:
# the first will be the registered contrib, 2nd will be non-reg contrib
reg_contributor_data, unreg_contributor_data = contributor_data
assert reg_contributor_data['id'] == user._id
assert reg_contributor_data['full_name'] == user.fullname
assert unreg_contributor_data['id'] is None
assert unreg_contributor_data['full_name'] == nr_data['nr_name']
|
apache-2.0
|
dvdmgl/django-pg-fts
|
testapp/models.py
|
2
|
1209
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from pg_fts.fields import TSVectorField
from django.db import models
@python_2_unicode_compatible
class TSQueryModel(models.Model):
title = models.CharField(max_length=50)
body = models.TextField()
sometext = models.CharField(max_length=50, null=True, blank=True)
tsvector = TSVectorField(('title', 'body'))
def __str__(self):
return self.title
@python_2_unicode_compatible
class TSMultidicModel(models.Model):
title = models.CharField(max_length=50)
body = models.TextField()
sometext = models.CharField(max_length=50, null=True, blank=True)
dictionary = models.CharField(
max_length=15,
choices=(('english', 'english'), ('portuguese', 'portuguese')),
default='english'
)
tsvector = TSVectorField((('title', 'A'), 'body'),
dictionary='dictionary')
def __str__(self):
return self.title
class Related(models.Model):
single = models.ForeignKey(TSQueryModel, blank=True, null=True)
multiple = models.ForeignKey(TSMultidicModel, blank=True, null=True)
|
bsd-2-clause
|
wkennington/rethinkdb
|
external/v8_3.30.33.16/build/gyp/test/win/gyptest-link-large-pdb.py
|
218
|
2332
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure msvs_large_pdb works correctly.
"""
import TestGyp
import struct
import sys
CHDIR = 'large-pdb'
def CheckImageAndPdb(test, image_basename, expected_page_size,
pdb_basename=None):
if not pdb_basename:
pdb_basename = image_basename + '.pdb'
test.built_file_must_exist(image_basename, chdir=CHDIR)
test.built_file_must_exist(pdb_basename, chdir=CHDIR)
# We expect the PDB to have the given page size. For full details of the
# header look here: https://code.google.com/p/pdbparser/wiki/MSF_Format
# We read the little-endian 4-byte unsigned integer at position 32 of the
# file.
pdb_path = test.built_file_path(pdb_basename, chdir=CHDIR)
pdb_file = open(pdb_path, 'rb')
pdb_file.seek(32, 0)
page_size = struct.unpack('<I', pdb_file.read(4))[0]
if page_size != expected_page_size:
print "Expected page size of %d, got %d for PDB file `%s'." % (
expected_page_size, page_size, pdb_path)
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
test.run_gyp('large-pdb.gyp', chdir=CHDIR)
test.build('large-pdb.gyp', 'large_pdb_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_exe.exe', 4096)
test.build('large-pdb.gyp', 'small_pdb_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'small_pdb_exe.exe', 1024)
test.build('large-pdb.gyp', 'large_pdb_dll', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_dll.dll', 4096)
test.build('large-pdb.gyp', 'small_pdb_dll', chdir=CHDIR)
CheckImageAndPdb(test, 'small_pdb_dll.dll', 1024)
test.build('large-pdb.gyp', 'large_pdb_implicit_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_implicit_exe.exe', 4096)
# This target has a different PDB name because it uses an
# 'msvs_large_pdb_path' variable.
test.build('large-pdb.gyp', 'large_pdb_variable_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_variable_exe.exe', 4096,
pdb_basename='foo.pdb')
# This target has a different output name because it uses 'product_name'.
test.build('large-pdb.gyp', 'large_pdb_product_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'bar.exe', 4096)
test.pass_test()
|
agpl-3.0
|
vsergeev/python-periphery
|
tests/test_spi.py
|
1
|
5681
|
import os
import sys
import periphery
from .test import ptest, pokay, passert, AssertRaises
if sys.version_info[0] == 3:
raw_input = input
spi_device = None
def test_arguments():
ptest()
# Invalid mode
with AssertRaises("invalid mode", ValueError):
periphery.SPI("/dev/spidev0.0", 4, int(1e6))
# Invalid bit order
with AssertRaises("invalid bit order", ValueError):
periphery.SPI("/dev/spidev0.0", 4, int(1e6), bit_order="blah")
def test_open_close():
ptest()
# Normal open (mode=1, max_speed = 100000)
spi = periphery.SPI(spi_device, 1, 100000)
# Confirm fd and defaults
passert("fd > 0", spi.fd > 0)
passert("mode is 1", spi.mode == 1)
passert("max speed is 100000", spi.max_speed == 100000)
passert("default bit_order is msb", spi.bit_order == "msb")
passert("default bits_per_word is 8", spi.bits_per_word == 8)
# Not going to try different bit order or bits per word, because not
# all SPI controllers support them
# Try modes 0, 1, 2, 3
spi.mode = 0
passert("spi mode is 0", spi.mode == 0)
spi.mode = 1
passert("spi mode is 1", spi.mode == 1)
spi.mode = 2
passert("spi mode is 2", spi.mode == 2)
spi.mode = 3
passert("spi mode is 3", spi.mode == 3)
# Try max speeds 100Khz, 500KHz, 1MHz, 2MHz
spi.max_speed = 100000
passert("max speed is 100KHz", spi.max_speed == 100000)
spi.max_speed = 500000
passert("max speed is 500KHz", spi.max_speed == 500000)
spi.max_speed = 1000000
passert("max speed is 1MHz", spi.max_speed == 1000000)
spi.max_speed = 2e6
passert("max speed is 2MHz", spi.max_speed == 2000000)
spi.close()
def test_loopback():
ptest()
spi = periphery.SPI(spi_device, 0, 100000)
# Try list transfer
buf_in = list(range(256)) * 4
buf_out = spi.transfer(buf_in)
passert("compare readback", buf_out == buf_in)
# Try bytearray transfer
buf_in = bytearray(buf_in)
buf_out = spi.transfer(buf_in)
passert("compare readback", buf_out == buf_in)
# Try bytes transfer
buf_in = bytes(bytearray(buf_in))
buf_out = spi.transfer(buf_in)
passert("compare readback", buf_out == buf_in)
spi.close()
def test_interactive():
ptest()
spi = periphery.SPI(spi_device, 0, 100000)
print("Starting interactive test. Get out your logic analyzer, buddy!")
raw_input("Press enter to continue...")
# Check tostring
print("SPI description: {}".format(str(spi)))
passert("interactive success", raw_input("SPI description looks ok? y/n ") == "y")
# Mode 0 transfer
raw_input("Press enter to start transfer...")
spi.transfer([0x55, 0xaa, 0x0f, 0xf0])
print("SPI data 0x55, 0xaa, 0x0f, 0xf0")
passert("interactive success", raw_input("SPI transfer speed <= 100KHz, mode 0 occurred? y/n ") == "y")
# Mode 1 transfer
spi.mode = 1
raw_input("Press enter to start transfer...")
spi.transfer([0x55, 0xaa, 0x0f, 0xf0])
print("SPI data 0x55, 0xaa, 0x0f, 0xf0")
passert("interactive success", raw_input("SPI transfer speed <= 100KHz, mode 1 occurred? y/n ") == "y")
# Mode 2 transfer
spi.mode = 2
raw_input("Press enter to start transfer...")
spi.transfer([0x55, 0xaa, 0x0f, 0xf0])
print("SPI data 0x55, 0xaa, 0x0f, 0xf0")
passert("interactive success", raw_input("SPI transfer speed <= 100KHz, mode 2 occurred? y/n ") == "y")
# Mode 3 transfer
spi.mode = 3
raw_input("Press enter to start transfer...")
spi.transfer([0x55, 0xaa, 0x0f, 0xf0])
print("SPI data 0x55, 0xaa, 0x0f, 0xf0")
passert("interactive success", raw_input("SPI transfer speed <= 100KHz, mode 3 occurred? y/n ") == "y")
spi.mode = 0
# 500KHz transfer
spi.max_speed = 500000
raw_input("Press enter to start transfer...")
spi.transfer([0x55, 0xaa, 0x0f, 0xf0])
print("SPI data 0x55, 0xaa, 0x0f, 0xf0")
passert("interactive success", raw_input("SPI transfer speed <= 500KHz, mode 0 occurred? y/n ") == "y")
# 1MHz transfer
spi.max_speed = 1000000
raw_input("Press enter to start transfer...")
spi.transfer([0x55, 0xaa, 0x0f, 0xf0])
print("SPI data 0x55, 0xaa, 0x0f, 0xf0")
passert("interactive success", raw_input("SPI transfer speed <= 1MHz, mode 0 occurred? y/n ") == "y")
spi.close()
if __name__ == "__main__":
if os.environ.get("CI") == "true":
test_arguments()
sys.exit(0)
if len(sys.argv) < 2:
print("Usage: python -m tests.test_spi <SPI device>")
print("")
print("[1/4] Arguments test: No requirements.")
print("[2/4] Open/close test: SPI device should be real.")
print("[3/4] Loopback test: SPI MISO and MOSI should be connected with a wire.")
print("[4/4] Interactive test: SPI MOSI, CLK, CS should be observed with an oscilloscope or logic analyzer.")
print("")
print("Hint: for Raspberry Pi 3, enable SPI0 with:")
print(" $ echo \"dtparam=spi=on\" | sudo tee -a /boot/config.txt")
print(" $ sudo reboot")
print("Use pins SPI0 MOSI (header pin 19), SPI0 MISO (header pin 21), SPI0 SCLK (header pin 23),")
print("connect a loopback between MOSI and MISO, and run this test with:")
print(" python -m tests.test_spi /dev/spidev0.0")
print("")
sys.exit(1)
spi_device = sys.argv[1]
test_arguments()
pokay("Arguments test passed.")
test_open_close()
pokay("Open/close test passed.")
test_loopback()
pokay("Loopback test passed.")
test_interactive()
pokay("Interactive test passed.")
pokay("All tests passed!")
|
mit
|
tomkrus007/Arduino
|
arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/packages/ordered_dict.py
|
1093
|
8936
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
lgpl-2.1
|
amith01994/intellij-community
|
python/lib/Lib/site-packages/django/db/models/fields/__init__.py
|
119
|
43384
|
import datetime
import decimal
import re
import time
import math
from itertools import tee
import django.utils.copycompat as copy
from django.db import connection
from django.db.models.fields.subclassing import LegacyConnection
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.functional import curry
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils import datetime_safe
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
class Field(object):
"""Base class for all field types"""
__metaclass__ = LegacyConnection
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _(u'Value %r is not a valid choice.'),
'null': _(u'This field cannot be null.'),
'blank': _(u'This field cannot be blank.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _(u'Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if self.empty_strings_allowed and connection.features.interprets_empty_strings_as_nulls:
self.null = True
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = unique_for_date, unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __cmp__(self, other):
# This is needed because bisect does not take a comparison function.
return cmp(self.creation_counter, other.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(self.error_messages['invalid_choice'] % value)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise exceptions.ValidationError(self.error_messages['blank'])
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors from to_python
and validate are propagated. The correct value is returned if no error is
raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# an XMLField is represented by a TEXT column type, which is the same
# as the TextField Django field type, which means XMLField's
# get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.creation.data_types[self.get_internal_type()] % data
except KeyError:
return None
def unique(self):
return self._unique or self.primary_key
unique = property(unique)
def set_attributes_from_name(self, name):
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and name:
self.verbose_name = name.replace('_', ' ')
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name, curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"Perform preliminary non-db specific value checks and conversions."
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"Returns field's value prepared for saving into a database."
return self.get_db_prep_value(value, connection=connection, prepared=False)
def get_prep_lookup(self, lookup_type, value):
"Perform preliminary non-db specific lookup checks and conversions"
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'regex', 'iregex', 'month', 'day', 'week_day', 'search',
'contains', 'icontains', 'iexact', 'startswith', 'istartswith',
'endswith', 'iendswith', 'isnull'
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"Returns field's value prepared for database lookup."
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day', 'search'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection, prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection, prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if self.get_internal_type() == 'DateField':
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return connection.ops.year_lookup_bounds(value)
def has_default(self):
"Returns a boolean of whether this field has a default value."
return self.default is not NOT_PROVIDED
def get_default(self):
"Returns the default value for this field."
if self.has_default():
if callable(self.default):
return self.default()
return force_unicode(self.default, strings_only=True)
if not self.empty_strings_allowed or (self.null and not connection.features.interprets_empty_strings_as_nulls):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname), smart_unicode(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_unicode(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"Returns flattened choices with a default blank choice included."
first_choice = include_blank and blank_choice or []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_unicode(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if hasattr(self._choices, 'next'):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=forms.CharField, **kwargs):
"Returns a django.forms.Field instance for this database Field."
defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = self.blank or not (self.has_default() or 'initial' in kwargs)
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be an integer.'),
}
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, "%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def validate(self, value, model_instance):
pass
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, "A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be either True or False.'),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
if 'default' not in kwargs and not kwargs.get('null'):
kwargs['default'] = False
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(self.error_messages['invalid'])
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = self.null or not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _(u'Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
ansi_date_re = re.compile(r'^\d{4}-\d{1,2}-\d{1,2}$')
class DateField(Field):
description = _("Date (without time)")
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Enter a valid date in YYYY-MM-DD format.'),
'invalid_date': _('Invalid date: %s'),
}
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
#HACKs : auto_now_add/auto_now should be done as a default or a pre_save.
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
if not ansi_date_re.search(value):
raise exceptions.ValidationError(self.error_messages['invalid'])
# Now that we have the date string in YYYY-MM-DD format, check to make
# sure it's a valid date.
# We could use time.strptime here and catch errors, but datetime.date
# produces much friendlier error messages.
year, month, day = map(int, value.split('-'))
try:
return datetime.date(year, month, day)
except ValueError, e:
msg = self.error_messages['invalid_date'] % _(str(e))
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
data = datetime_safe.new_date(val).strftime("%Y-%m-%d")
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
default_error_messages = {
'invalid': _(u'Enter a valid date/time in YYYY-MM-DD HH:MM[:ss[.uuuuuu]] format.'),
}
description = _("Date (with time)")
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
# Attempt to parse a datetime:
value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M')[:5],
**kwargs)
except ValueError: # Try without hour/minutes/seconds.
try:
return datetime.datetime(*time.strptime(value, '%Y-%m-%d')[:3],
**kwargs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
d = datetime_safe.new_datetime(val)
data = d.strftime('%Y-%m-%d %H:%M:%S')
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be a decimal number.'),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(self.error_messages['invalid'])
def _format(self, value):
if isinstance(value, basestring) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("E-mail address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed twice
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None, recursive=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if (lookup_type == 'gte' or lookup_type == 'lt') \
and isinstance(value, float):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(self.error_messages['invalid'])
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
description = _("Time")
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Enter a valid time in HH:MM[:ss[.uuuuuu]] format.'),
}
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
# Attempt to parse a datetime:
value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.time(*time.strptime(value, '%H:%M:%S')[3:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.time(*time.strptime(value, '%H:%M')[3:5],
**kwargs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
data = val.strftime("%H:%M:%S")
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
description = _("URL")
def __init__(self, verbose_name=None, name=None, verify_exists=True, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
self.validators.append(validators.URLValidator(verify_exists=verify_exists))
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed twice
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class XMLField(TextField):
description = _("XML text")
def __init__(self, verbose_name=None, name=None, schema_path=None, **kwargs):
self.schema_path = schema_path
Field.__init__(self, verbose_name, name, **kwargs)
|
apache-2.0
|
MaheshIBM/keystone
|
keystone/trust/core.py
|
2
|
9426
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Identity service."""
import abc
import six
from keystone.common import dependency
from keystone.common import manager
from keystone import config
from keystone import exception
from keystone.i18n import _
from keystone import notifications
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
@dependency.requires('identity_api')
@dependency.provider('trust_api')
class Manager(manager.Manager):
"""Default pivot point for the Trust backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
_TRUST = "OS-TRUST:trust"
def __init__(self):
super(Manager, self).__init__(CONF.trust.driver)
@staticmethod
def _validate_redelegation(redelegated_trust, trust):
# Validate against:
# 0 < redelegation_count <= max_redelegation_count
max_redelegation_count = CONF.trust.max_redelegation_count
redelegation_depth = redelegated_trust.get('redelegation_count', 0)
if not (0 < redelegation_depth <= max_redelegation_count):
raise exception.Forbidden(
_('Remaining redelegation depth of %(redelegation_depth)d'
' out of allowed range of [0..%(max_count)d]'),
redelegation_depth=redelegation_depth,
max_count=max_redelegation_count)
# remaining_uses is None
remaining_uses = trust.get('remaining_uses')
if remaining_uses is not None:
raise exception.Forbidden(
_('Field "remaining_uses" is set to %(value)s'
' while it must not be set in order to redelegate a trust'),
value=remaining_uses)
# expiry times
trust_expiry = trust.get('expires_at')
redelegated_expiry = redelegated_trust['expires_at']
if trust_expiry:
# redelegated trust is from backend and has no tzinfo
if redelegated_expiry < trust_expiry.replace(tzinfo=None):
raise exception.Forbidden(
_('Requested expiration time is more '
'than redelegated trust can provide'))
else:
trust['expires_at'] = redelegated_expiry
# trust roles is a subset of roles of the redelegated trust
parent_roles = set(role['id']
for role in redelegated_trust['roles'])
if not all(role['id'] in parent_roles for role in trust['roles']):
raise exception.Forbidden(
_('Some of requested roles are not in redelegated trust'))
def get_trust_pedigree(self, trust_id):
trust = self.driver.get_trust(trust_id)
trust_chain = [trust]
if trust and trust.get('redelegated_trust_id'):
trusts = self.driver.list_trusts_for_trustor(
trust['trustor_user_id'])
while trust_chain[-1].get('redelegated_trust_id'):
for t in trusts:
if t['id'] == trust_chain[-1]['redelegated_trust_id']:
trust_chain.append(t)
break
return trust_chain
def get_trust(self, trust_id, deleted=False):
trust = self.driver.get_trust(trust_id, deleted)
if trust and trust.get('redelegated_trust_id') and not deleted:
trust_chain = self.get_trust_pedigree(trust_id)
for parent, child in zip(trust_chain[1:], trust_chain):
self._validate_redelegation(parent, child)
try:
self.identity_api.assert_user_enabled(
parent['trustee_user_id'])
except (AssertionError, exception.NotFound):
raise exception.Forbidden(
_('One of the trust agents is disabled or deleted'))
return trust
@notifications.created(_TRUST)
def create_trust(self, trust_id, trust, roles, redelegated_trust=None):
"""Create a new trust.
:returns: a new trust
"""
# Default for initial trust in chain is max_redelegation_count
max_redelegation_count = CONF.trust.max_redelegation_count
requested_count = trust.get('redelegation_count')
redelegatable = (trust.pop('allow_redelegation', False)
and requested_count != 0)
if not redelegatable:
trust['redelegation_count'] = requested_count = 0
remaining_uses = trust.get('remaining_uses')
if remaining_uses is not None and remaining_uses <= 0:
msg = _('remaining_uses must be a positive integer or null.')
raise exception.ValidationError(msg)
else:
# Validate requested redelegation depth
if requested_count and requested_count > max_redelegation_count:
raise exception.Forbidden(
_('Requested redelegation depth of %(requested_count)d '
'is greater than allowed %(max_count)d'),
requested_count=requested_count,
max_count=max_redelegation_count)
# Decline remaining_uses
if 'remaining_uses' in trust:
exception.ValidationError(_('remaining_uses must not be set '
'if redelegation is allowed'))
if redelegated_trust:
trust['redelegated_trust_id'] = redelegated_trust['id']
remaining_count = redelegated_trust['redelegation_count'] - 1
# Validate depth consistency
if (redelegatable and requested_count and
requested_count != remaining_count):
msg = _('Modifying "redelegation_count" upon redelegation is '
'forbidden. Omitting this parameter is advised.')
raise exception.Forbidden(msg)
trust.setdefault('redelegation_count', remaining_count)
# Check entire trust pedigree validity
pedigree = self.get_trust_pedigree(redelegated_trust['id'])
for t in pedigree:
self._validate_redelegation(t, trust)
trust.setdefault('redelegation_count', max_redelegation_count)
return self.driver.create_trust(trust_id, trust, roles)
@notifications.deleted(_TRUST)
def delete_trust(self, trust_id):
"""Remove a trust.
:raises: keystone.exception.TrustNotFound
Recursively remove given and redelegated trusts
"""
trust = self.driver.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id)
trusts = self.driver.list_trusts_for_trustor(
trust['trustor_user_id'])
for t in trusts:
if t.get('redelegated_trust_id') == trust_id:
# recursive call to make sure all notifications are sent
try:
self.delete_trust(t['id'])
except exception.TrustNotFound:
# if trust was deleted by concurrent process
# consistency must not suffer
pass
# end recursion
self.driver.delete_trust(trust_id)
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
@abc.abstractmethod
def create_trust(self, trust_id, trust, roles):
"""Create a new trust.
:returns: a new trust
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_trust(self, trust_id, deleted=False):
"""Get a trust by the trust id.
:param trust_id: the trust identifier
:type trust_id: string
:param deleted: return the trust even if it is deleted, expired, or
has no consumptions left
:type deleted: bool
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_trusts(self):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_trusts_for_trustee(self, trustee):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_trusts_for_trustor(self, trustor):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_trust(self, trust_id):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def consume_use(self, trust_id):
"""Consume one use when a trust was created with a limitation on its
uses, provided there are still uses available.
:raises: keystone.exception.TrustUseLimitReached,
keystone.exception.TrustNotFound
"""
raise exception.NotImplemented() # pragma: no cover
|
apache-2.0
|
nlholdem/icodoom
|
.venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/__init__.py
|
360
|
2852
|
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
from __future__ import absolute_import
import warnings
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
__author__ = 'Andrey Petrov ([email protected])'
__license__ = 'MIT'
__version__ = '1.16'
__all__ = (
'HTTPConnectionPool',
'HTTPSConnectionPool',
'PoolManager',
'ProxyManager',
'HTTPResponse',
'Retry',
'Timeout',
'add_stderr_logger',
'connection_from_url',
'disable_warnings',
'encode_multipart_formdata',
'get_host',
'make_headers',
'proxy_from_url',
)
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s', __name__)
return handler
# ... Clean up.
del NullHandler
# All warning filters *must* be appended unless you're really certain that they
# shouldn't be: otherwise, it's very hard for users to use most Python
# mechanisms to silence them.
# SecurityWarning's always go off by default.
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# SubjectAltNameWarning's should go off once per host
warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
append=True)
# SNIMissingWarnings should go off only once.
warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
|
gpl-3.0
|
deKupini/erp
|
addons/account/account.py
|
3
|
190066
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import itemgetter
import time
import openerp
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.osv import fields, osv, expression
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.exceptions import UserError
import openerp.addons.decimal_precision as dp
_logger = logging.getLogger(__name__)
def check_cycle(self, cr, uid, ids, context=None):
""" climbs the ``self._table.parent_id`` chains for 100 levels or
until it can't find any more parent(s)
Returns true if it runs out of parents (no cycle), false if
it can recurse 100 times without ending all chains
"""
level = 100
while len(ids):
cr.execute('SELECT DISTINCT parent_id '\
'FROM '+self._table+' '\
'WHERE id IN %s '\
'AND parent_id IS NOT NULL',(tuple(ids),))
ids = map(itemgetter(0), cr.fetchall())
if not level:
return False
level -= 1
return True
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'income_currency_exchange_account_id': fields.many2one(
'account.account',
string="Gain Exchange Rate Account",
domain="[('type', '=', 'other')]",),
'expense_currency_exchange_account_id': fields.many2one(
'account.account',
string="Loss Exchange Rate Account",
domain="[('type', '=', 'other')]",),
}
class account_payment_term(osv.osv):
_name = "account.payment.term"
_description = "Payment Term"
_columns = {
'name': fields.char('Payment Term', translate=True, required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the payment term without removing it."),
'note': fields.text('Description', translate=True),
'line_ids': fields.one2many('account.payment.term.line', 'payment_id', 'Terms', copy=True),
}
_defaults = {
'active': 1,
}
_order = "name"
def compute(self, cr, uid, id, value, date_ref=False, context=None):
if not date_ref:
date_ref = datetime.now().strftime('%Y-%m-%d')
pt = self.browse(cr, uid, id, context=context)
amount = value
result = []
obj_precision = self.pool.get('decimal.precision')
prec = obj_precision.precision_get(cr, uid, 'Account')
for line in pt.line_ids:
if line.value == 'fixed':
amt = round(line.value_amount, prec)
elif line.value == 'procent':
amt = round(value * (line.value_amount/100.0), prec)
elif line.value == 'balance':
amt = round(amount, prec)
if amt:
next_date = (datetime.strptime(date_ref, '%Y-%m-%d') + relativedelta(days=line.days))
if line.days2 < 0:
next_first_date = next_date + relativedelta(day=1,months=1) #Getting 1st of next month
next_date = next_first_date + relativedelta(days=line.days2)
if line.days2 > 0:
next_date += relativedelta(day=line.days2, months=1)
result.append( (next_date.strftime('%Y-%m-%d'), amt) )
amount -= amt
amount = reduce(lambda x,y: x+y[1], result, 0.0)
dist = round(value-amount, prec)
if dist:
result.append( (time.strftime('%Y-%m-%d'), dist) )
return result
class account_payment_term_line(osv.osv):
_name = "account.payment.term.line"
_description = "Payment Term Line"
_columns = {
'value': fields.selection([('procent', 'Percent'),
('balance', 'Balance'),
('fixed', 'Fixed Amount')], 'Computation',
required=True, help="""Select here the kind of valuation related to this payment term line. Note that you should have your last line with the type 'Balance' to ensure that the whole amount will be treated."""),
'value_amount': fields.float('Amount To Pay', digits_compute=dp.get_precision('Payment Term'), help="For percent enter a ratio between 0-100%."),
'days': fields.integer('Number of Days', required=True, help="Number of days to add before computation of the day of month." \
"If Date=15/01, Number of Days=22, Day of Month=-1, then the due date is 28/02."),
'days2': fields.integer('Day of the Month', required=True, help="Day of the month, set -1 for the last day of the current month. If it's positive, it gives the day of the next month. Set 0 for net days (otherwise it's based on the beginning of the month)."),
'payment_id': fields.many2one('account.payment.term', 'Payment Term', required=True, select=True, ondelete='cascade'),
}
_defaults = {
'value': 'balance',
'days': 30,
'days2': 0,
}
_order = "value desc,days"
def _check_percent(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0], context=context)
if obj.value == 'procent' and ( obj.value_amount < 0.0 or obj.value_amount > 100.0):
return False
return True
_constraints = [
(_check_percent, 'Percentages for Payment Term Line must be between 0 and 100.', ['value_amount']),
]
class account_account_type(osv.osv):
_name = "account.account.type"
_description = "Account Type"
def _get_financial_report_ref(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
obj_financial_report = self.pool.get('account.financial.report')
financial_report_ref = {}
for key, financial_report in [
('asset','account_financial_report_assets0'),
('liability','account_financial_report_liability0'),
('income','account_financial_report_income0'),
('expense','account_financial_report_expense0'),
]:
try:
financial_report_ref[key] = obj_financial_report.browse(cr, uid,
obj_data.get_object_reference(cr, uid, 'account', financial_report)[1],
context=context)
except ValueError:
pass
return financial_report_ref
def _get_current_report_type(self, cr, uid, ids, name, arg, context=None):
res = {}
financial_report_ref = self._get_financial_report_ref(cr, uid, context=context)
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = 'none'
for key, financial_report in financial_report_ref.items():
list_ids = [x.id for x in financial_report.account_type_ids]
if record.id in list_ids:
res[record.id] = key
return res
def _save_report_type(self, cr, uid, account_type_id, field_name, field_value, arg, context=None):
field_value = field_value or 'none'
obj_financial_report = self.pool.get('account.financial.report')
#unlink if it exists somewhere in the financial reports related to BS or PL
financial_report_ref = self._get_financial_report_ref(cr, uid, context=context)
for key, financial_report in financial_report_ref.items():
list_ids = [x.id for x in financial_report.account_type_ids]
if account_type_id in list_ids:
obj_financial_report.write(cr, uid, [financial_report.id], {'account_type_ids': [(3, account_type_id)]})
#write it in the good place
if field_value != 'none':
return obj_financial_report.write(cr, uid, [financial_report_ref[field_value].id], {'account_type_ids': [(4, account_type_id)]})
_columns = {
'name': fields.char('Account Type', required=True, translate=True),
'code': fields.char('Code', size=32, required=True, select=True),
'close_method': fields.selection([('none', 'None'), ('balance', 'Balance'), ('detail', 'Detail'), ('unreconciled', 'Unreconciled')], 'Carry Forward Method', required=True, help="""Set here the method that will be used to generate the end of year journal entries for all the accounts of this type.
'None' means that nothing will be done.
'Balance' will generally be used for cash accounts.
'Detail' will copy each existing journal item of the previous year, even the reconciled ones.
'Unreconciled' will copy only the journal items that were unreconciled on the first day of the new fiscal year."""),
'report_type': fields.function(_get_current_report_type, fnct_inv=_save_report_type, type='selection', string='P&L / BS Category', store=True,
selection= [('none','/'),
('income', _('Profit & Loss (Income account)')),
('expense', _('Profit & Loss (Expense account)')),
('asset', _('Balance Sheet (Asset account)')),
('liability', _('Balance Sheet (Liability account)'))], help="This field is used to generate legal reports: profit and loss, balance sheet.", required=True),
'note': fields.text('Description'),
}
_defaults = {
'close_method': 'none',
'report_type': 'none',
}
_order = "code"
def _code_get(self, cr, uid, context=None):
acc_type_obj = self.pool.get('account.account.type')
ids = acc_type_obj.search(cr, uid, [])
res = acc_type_obj.read(cr, uid, ids, ['code', 'name'], context=context)
return [(r['code'], r['name']) for r in res]
#----------------------------------------------------------
# Accounts
#----------------------------------------------------------
class account_account(osv.osv):
_order = "parent_left"
_parent_order = "code"
_name = "account.account"
_description = "Account"
_parent_store = True
def search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False):
if context is None:
context = {}
pos = 0
while pos < len(args):
if args[pos][0] == 'code' and args[pos][1] in ('like', 'ilike') and args[pos][2]:
args[pos] = ('code', '=like', tools.ustr(args[pos][2].replace('%', ''))+'%')
if args[pos][0] == 'journal_id':
if not args[pos][2]:
del args[pos]
continue
jour = self.pool.get('account.journal').browse(cr, uid, args[pos][2], context=context)
if (not (jour.account_control_ids or jour.type_control_ids)) or not args[pos][2]:
args[pos] = ('type','not in',('consolidation','view'))
continue
ids3 = map(lambda x: x.id, jour.type_control_ids)
ids1 = super(account_account, self).search(cr, uid, [('user_type', 'in', ids3)])
ids1 += map(lambda x: x.id, jour.account_control_ids)
args[pos] = ('id', 'in', ids1)
pos += 1
if context and context.has_key('consolidate_children'): #add consolidated children of accounts
ids = super(account_account, self).search(cr, uid, args, offset, limit,
order, context=context, count=count)
for consolidate_child in self.browse(cr, uid, context['account_id'], context=context).child_consol_ids:
ids.append(consolidate_child.id)
return ids
return super(account_account, self).search(cr, uid, args, offset, limit,
order, context=context, count=count)
def _get_children_and_consol(self, cr, uid, ids, context=None):
#this function search for all the children and all consolidated children (recursively) of the given account ids
ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)], context=context)
ids3 = []
for rec in self.browse(cr, uid, ids2, context=context):
for child in rec.child_consol_ids:
ids3.append(child.id)
if ids3:
ids3 = self._get_children_and_consol(cr, uid, ids3, context)
return ids2 + ids3
def __compute(self, cr, uid, ids, field_names, arg=None, context=None,
query='', query_params=()):
""" compute the balance, debit and/or credit for the provided
account ids
Arguments:
`ids`: account ids
`field_names`: the fields to compute (a list of any of
'balance', 'debit' and 'credit')
`arg`: unused fields.function stuff
`query`: additional query filter (as a string)
`query_params`: parameters for the provided query string
(__compute will handle their escaping) as a
tuple
"""
mapping = {
'balance': "COALESCE(SUM(l.debit),0) - COALESCE(SUM(l.credit), 0) as balance",
'debit': "COALESCE(SUM(l.debit), 0) as debit",
'credit': "COALESCE(SUM(l.credit), 0) as credit",
# by convention, foreign_balance is 0 when the account has no secondary currency, because the amounts may be in different currencies
'foreign_balance': "(SELECT CASE WHEN currency_id IS NULL THEN 0 ELSE COALESCE(SUM(l.amount_currency), 0) END FROM account_account WHERE id IN (l.account_id)) as foreign_balance",
}
#get all the necessary accounts
children_and_consolidated = self._get_children_and_consol(cr, uid, ids, context=context)
#compute for each account the balance/debit/credit from the move lines
accounts = {}
res = {}
null_result = dict((fn, 0.0) for fn in field_names)
if children_and_consolidated:
aml_query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
wheres = [""]
if query.strip():
wheres.append(query.strip())
if aml_query.strip():
wheres.append(aml_query.strip())
filters = " AND ".join(wheres)
# IN might not work ideally in case there are too many
# children_and_consolidated, in that case join on a
# values() e.g.:
# SELECT l.account_id as id FROM account_move_line l
# INNER JOIN (VALUES (id1), (id2), (id3), ...) AS tmp (id)
# ON l.account_id = tmp.id
# or make _get_children_and_consol return a query and join on that
request = ("SELECT l.account_id as id, " +\
', '.join(mapping.values()) +
" FROM account_move_line l" \
" WHERE l.account_id IN %s " \
+ filters +
" GROUP BY l.account_id")
params = (tuple(children_and_consolidated),) + query_params
cr.execute(request, params)
for row in cr.dictfetchall():
accounts[row['id']] = row
# consolidate accounts with direct children
children_and_consolidated.reverse()
brs = list(self.browse(cr, uid, children_and_consolidated, context=context))
sums = {}
currency_obj = self.pool.get('res.currency')
while brs:
current = brs.pop(0)
# can_compute = True
# for child in current.child_id:
# if child.id not in sums:
# can_compute = False
# try:
# brs.insert(0, brs.pop(brs.index(child)))
# except ValueError:
# brs.insert(0, child)
# if can_compute:
for fn in field_names:
sums.setdefault(current.id, {})[fn] = accounts.get(current.id, {}).get(fn, 0.0)
for child in current.child_id:
if child.company_id.currency_id.id == current.company_id.currency_id.id:
sums[current.id][fn] += sums[child.id][fn]
else:
sums[current.id][fn] += currency_obj.compute(cr, uid, child.company_id.currency_id.id, current.company_id.currency_id.id, sums[child.id][fn], context=context)
# as we have to relay on values computed before this is calculated separately than previous fields
if current.currency_id and current.exchange_rate and \
('adjusted_balance' in field_names or 'unrealized_gain_loss' in field_names):
# Computing Adjusted Balance and Unrealized Gains and losses
# Adjusted Balance = Foreign Balance / Exchange Rate
# Unrealized Gains and losses = Adjusted Balance - Balance
adj_bal = sums[current.id].get('foreign_balance', 0.0) / current.exchange_rate
sums[current.id].update({'adjusted_balance': adj_bal, 'unrealized_gain_loss': adj_bal - sums[current.id].get('balance', 0.0)})
for id in ids:
res[id] = sums.get(id, null_result)
else:
for id in ids:
res[id] = null_result
return res
def _get_company_currency(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
result[rec.id] = (rec.company_id.currency_id.id,rec.company_id.currency_id.symbol)
return result
def _get_child_ids(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for record in self.browse(cr, uid, ids, context=context):
if record.child_parent_ids:
result[record.id] = [x.id for x in record.child_parent_ids]
else:
result[record.id] = []
if record.child_consol_ids:
for acc in record.child_consol_ids:
if acc.id not in result[record.id]:
result[record.id].append(acc.id)
return result
def _get_level(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
#we may not know the level of the parent at the time of computation, so we
# can't simply do res[account.id] = account.parent_id.level + 1
level = 0
parent = account.parent_id
while parent:
level += 1
parent = parent.parent_id
res[account.id] = level
return res
def _set_credit_debit(self, cr, uid, account_id, name, value, arg, context=None):
if context.get('config_invisible', True):
return True
account = self.browse(cr, uid, account_id, context=context)
diff = value - getattr(account,name)
if not diff:
return True
journal_obj = self.pool.get('account.journal')
jids = journal_obj.search(cr, uid, [('type','=','situation'),('centralisation','=',1),('company_id','=',account.company_id.id)], context=context)
if not jids:
raise UserError(_("You need an Opening journal with centralisation checked to set the initial balance."))
period_obj = self.pool.get('account.period')
pids = period_obj.search(cr, uid, [('special','=',True),('company_id','=',account.company_id.id)], context=context)
if not pids:
raise UserError(_("There is no opening/closing period defined, please create one to set the initial balance."))
move_obj = self.pool.get('account.move.line')
move_id = move_obj.search(cr, uid, [
('journal_id','=',jids[0]),
('period_id','=',pids[0]),
('account_id','=', account_id),
(name,'>', 0.0),
('name','=', _('Opening Balance'))
], context=context)
if move_id:
move = move_obj.browse(cr, uid, move_id[0], context=context)
move_obj.write(cr, uid, move_id[0], {
name: diff+getattr(move,name)
}, context=context)
else:
if diff<0.0:
raise UserError(_("Unable to adapt the initial balance (negative value)."))
nameinv = (name=='credit' and 'debit') or 'credit'
move_id = move_obj.create(cr, uid, {
'name': _('Opening Balance'),
'account_id': account_id,
'journal_id': jids[0],
'period_id': pids[0],
name: diff,
nameinv: 0.0
}, context=context)
return True
_columns = {
'name': fields.char('Name', required=True, select=True),
'currency_id': fields.many2one('res.currency', 'Secondary Currency', help="Forces all moves for this account to have this secondary currency."),
'code': fields.char('Code', size=64, required=True, select=1),
'type': fields.selection([
('view', 'View'),
('other', 'Regular'),
('receivable', 'Receivable'),
('payable', 'Payable'),
('liquidity','Liquidity'),
('consolidation', 'Consolidation'),
('closed', 'Closed'),
], 'Internal Type', required=True, help="The 'Internal Type' is used for features available on "\
"different types of accounts: view can not have journal items, consolidation are accounts that "\
"can have children accounts for multi-company consolidations, payable/receivable are for "\
"partners accounts (for debit/credit computations), closed for depreciated accounts."),
'user_type': fields.many2one('account.account.type', 'Account Type', required=True,
help="Account Type is used for information purpose, to generate "
"country-specific legal reports, and set the rules to close a fiscal year and generate opening entries."),
'financial_report_ids': fields.many2many('account.financial.report', 'account_account_financial_report', 'account_id', 'report_line_id', 'Financial Reports'),
'parent_id': fields.many2one('account.account', 'Parent', ondelete='cascade', domain=[('type','=','view')]),
'child_parent_ids': fields.one2many('account.account','parent_id','Children'),
'child_consol_ids': fields.many2many('account.account', 'account_account_consol_rel', 'child_id', 'parent_id', 'Consolidated Children'),
'child_id': fields.function(_get_child_ids, type='many2many', relation="account.account", string="Child Accounts"),
'balance': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Balance', multi='balance'),
'credit': fields.function(__compute, fnct_inv=_set_credit_debit, digits_compute=dp.get_precision('Account'), string='Credit', multi='balance'),
'debit': fields.function(__compute, fnct_inv=_set_credit_debit, digits_compute=dp.get_precision('Account'), string='Debit', multi='balance'),
'foreign_balance': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Foreign Balance', multi='balance',
help="Total amount (in Secondary currency) for transactions held in secondary currency for this account."),
'adjusted_balance': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Adjusted Balance', multi='balance',
help="Total amount (in Company currency) for transactions held in secondary currency for this account."),
'unrealized_gain_loss': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Unrealized Gain or Loss', multi='balance',
help="Value of Loss or Gain due to changes in exchange rate when doing multi-currency transactions."),
'reconcile': fields.boolean('Allow Reconciliation', help="Check this box if this account allows reconciliation of journal items."),
'exchange_rate': fields.related('currency_id', 'rate', type='float', string='Exchange Rate', digits=(12,6)),
'shortcut': fields.char('Shortcut', size=12),
'tax_ids': fields.many2many('account.tax', 'account_account_tax_default_rel',
'account_id', 'tax_id', 'Default Taxes'),
'note': fields.text('Internal Notes'),
'company_currency_id': fields.function(_get_company_currency, type='many2one', relation='res.currency', string='Company Currency'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'active': fields.boolean('Active', select=2, help="If the active field is set to False, it will allow you to hide the account without removing it."),
'parent_left': fields.integer('Parent Left', select=1),
'parent_right': fields.integer('Parent Right', select=1),
'currency_mode': fields.selection([('current', 'At Date'), ('average', 'Average Rate')], 'Outgoing Currencies Rate',
help=
'This will select how the current currency rate for outgoing transactions is computed. '\
'In most countries the legal method is "average" but only a few software systems are able to '\
'manage this. So if you import from another software system you may have to use the rate at date. ' \
'Incoming transactions always use the rate at date.', \
required=True),
'level': fields.function(_get_level, string='Level', method=True, type='integer',
store={
'account.account': (_get_children_and_consol, ['level', 'parent_id'], 10),
}),
}
_defaults = {
'type': 'other',
'reconcile': False,
'active': True,
'currency_mode': 'current',
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'account.account', context=c),
}
def _check_recursion(self, cr, uid, ids, context=None):
obj_self = self.browse(cr, uid, ids[0], context=context)
p_id = obj_self.parent_id and obj_self.parent_id.id
if (obj_self in obj_self.child_consol_ids) or (p_id and (p_id is obj_self.id)):
return False
while(ids):
cr.execute('SELECT DISTINCT child_id '\
'FROM account_account_consol_rel '\
'WHERE parent_id IN %s', (tuple(ids),))
child_ids = map(itemgetter(0), cr.fetchall())
c_ids = child_ids
if (p_id and (p_id in c_ids)) or (obj_self.id in c_ids):
return False
while len(c_ids):
s_ids = self.search(cr, uid, [('parent_id', 'in', c_ids)])
if p_id and (p_id in s_ids):
return False
c_ids = s_ids
ids = child_ids
return True
def _check_type(self, cr, uid, ids, context=None):
if context is None:
context = {}
accounts = self.browse(cr, uid, ids, context=context)
for account in accounts:
if account.child_id and account.type not in ('view', 'consolidation'):
return False
return True
def _check_account_type(self, cr, uid, ids, context=None):
for account in self.browse(cr, uid, ids, context=context):
if account.type in ('receivable', 'payable') and account.user_type.close_method != 'unreconciled':
return False
return True
def _check_company_account(self, cr, uid, ids, context=None):
for account in self.browse(cr, uid, ids, context=context):
if account.parent_id:
if account.company_id != account.parent_id.company_id:
return False
return True
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive accounts.', ['parent_id']),
(_check_type, 'Configuration Error!\nYou cannot define children to an account with internal type different of "View".', ['type']),
(_check_account_type, 'Configuration Error!\nYou cannot select an account type with a `carry forward` method different of "Unreconciled" for accounts with internal type "Payable/Receivable".', ['user_type','type']),
(_check_company_account, 'Error!\nYou cannot create an account which has parent account of different company.', ['parent_id']),
]
_sql_constraints = [
('code_company_uniq', 'unique (code,company_id)', 'The code of the account must be unique per company !')
]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
args = args[:]
ids = []
try:
if name and str(name).startswith('partner:'):
part_id = int(name.split(':')[1])
part = self.pool.get('res.partner').browse(cr, user, part_id, context=context)
args += [('id', 'in', (part.property_account_payable.id, part.property_account_receivable.id))]
name = False
if name and str(name).startswith('type:'):
type = name.split(':')[1]
args += [('type', '=', type)]
name = False
except:
pass
if name:
if operator not in expression.NEGATIVE_TERM_OPERATORS:
plus_percent = lambda n: n+'%'
code_op, code_conv = {
'ilike': ('=ilike', plus_percent),
'like': ('=like', plus_percent),
}.get(operator, (operator, lambda n: n))
ids = self.search(cr, user, ['|', ('code', code_op, code_conv(name)), '|', ('shortcut', '=', name), ('name', operator, name)]+args, limit=limit)
if not ids and len(name.split()) >= 2:
#Separating code and name of account for searching
operand1,operand2 = name.split(' ',1) #name can contain spaces e.g. OpenERP S.A.
ids = self.search(cr, user, [('code', operator, operand1), ('name', operator, operand2)]+ args, limit=limit)
else:
ids = self.search(cr, user, ['&','!', ('code', '=like', name+"%"), ('name', operator, name)]+args, limit=limit)
# as negation want to restric, do if already have results
if ids and len(name.split()) >= 2:
operand1,operand2 = name.split(' ',1) #name can contain spaces e.g. OpenERP S.A.
ids = self.search(cr, user, [('code', operator, operand1), ('name', operator, operand2), ('id', 'in', ids)]+ args, limit=limit)
else:
ids = self.search(cr, user, args, context=context, limit=limit)
return self.name_get(cr, user, ids, context=context)
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name', 'code'], context=context)
res = []
for record in reads:
name = record['name']
if record['code']:
name = record['code'] + ' ' + name
res.append((record['id'], name))
return res
def copy(self, cr, uid, id, default=None, context=None, done_list=None, local=False):
default = {} if default is None else default.copy()
if done_list is None:
done_list = []
account = self.browse(cr, uid, id, context=context)
new_child_ids = []
default.update(code=_("%s (copy)") % (account['code'] or ''))
if not local:
done_list = []
if account.id in done_list:
return False
done_list.append(account.id)
if account:
for child in account.child_id:
child_ids = self.copy(cr, uid, child.id, default, context=context, done_list=done_list, local=True)
if child_ids:
new_child_ids.append(child_ids)
default['child_parent_ids'] = [(6, 0, new_child_ids)]
else:
default['child_parent_ids'] = False
return super(account_account, self).copy(cr, uid, id, default, context=context)
def _check_moves(self, cr, uid, ids, method, context=None):
line_obj = self.pool.get('account.move.line')
account_ids = self.search(cr, uid, [('id', 'child_of', ids)], context=context)
if line_obj.search(cr, uid, [('account_id', 'in', account_ids)], context=context):
if method == 'write':
raise UserError(_('You cannot deactivate an account that contains journal items.'))
elif method == 'unlink':
raise UserError(_('You cannot remove an account that contains journal items.'))
#Checking whether the account is set as a property to any Partner or not
values = ['account.account,%s' % (account_id,) for account_id in ids]
partner_prop_acc = self.pool.get('ir.property').search(cr, uid, [('value_reference','in', values)], context=context)
if partner_prop_acc:
raise UserError(_('You cannot remove/deactivate an account which is set on a customer or supplier.'))
return True
def _check_allow_type_change(self, cr, uid, ids, new_type, context=None):
restricted_groups = ['consolidation','view']
line_obj = self.pool.get('account.move.line')
for account in self.browse(cr, uid, ids, context=context):
old_type = account.type
account_ids = self.search(cr, uid, [('id', 'child_of', [account.id])])
if line_obj.search(cr, uid, [('account_id', 'in', account_ids)]):
#Check for 'Closed' type
if old_type == 'closed' and new_type !='closed':
raise UserError(_("You cannot change the type of account from 'Closed' to any other type as it contains journal items!"))
# Forbid to change an account type for restricted_groups as it contains journal items (or if one of its children does)
if (new_type in restricted_groups):
raise UserError(_("You cannot change the type of account to '%s' type as it contains journal items!") % (new_type,))
return True
# For legal reason (forbiden to modify journal entries which belongs to a closed fy or period), Forbid to modify
# the code of an account if journal entries have been already posted on this account. This cannot be simply
# 'configurable' since it can lead to a lack of confidence in Odoo and this is what we want to change.
def _check_allow_code_change(self, cr, uid, ids, context=None):
line_obj = self.pool.get('account.move.line')
for account in self.browse(cr, uid, ids, context=context):
account_ids = self.search(cr, uid, [('id', 'child_of', [account.id])], context=context)
if line_obj.search(cr, uid, [('account_id', 'in', account_ids)], context=context):
raise UserError(_("You cannot change the code of account which contains journal items!"))
return True
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
# Dont allow changing the company_id when account_move_line already exist
if 'company_id' in vals:
move_lines = self.pool.get('account.move.line').search(cr, uid, [('account_id', 'in', ids)], context=context)
if move_lines:
# Allow the write if the value is the same
for i in [i['company_id'][0] for i in self.read(cr,uid,ids,['company_id'], context=context)]:
if vals['company_id']!=i:
raise UserError(_('You cannot change the owner company of an account that already contains journal items.'))
if 'active' in vals and not vals['active']:
self._check_moves(cr, uid, ids, "write", context=context)
if 'type' in vals.keys():
self._check_allow_type_change(cr, uid, ids, vals['type'], context=context)
if 'code' in vals.keys():
self._check_allow_code_change(cr, uid, ids, context=context)
return super(account_account, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
self._check_moves(cr, uid, ids, "unlink", context=context)
return super(account_account, self).unlink(cr, uid, ids, context=context)
class account_journal(osv.osv):
_name = "account.journal"
_description = "Journal"
_columns = {
'with_last_closing_balance': fields.boolean('Opening With Last Closing Balance', help="For cash or bank journal, this option should be unchecked when the starting balance should always set to 0 for new documents."),
'name': fields.char('Journal Name', required=True),
'code': fields.char('Code', size=5, required=True, help="The code will be displayed on reports."),
'type': fields.selection([('sale', 'Sale'),('sale_refund','Sale Refund'), ('purchase', 'Purchase'), ('purchase_refund','Purchase Refund'), ('cash', 'Cash'), ('bank', 'Bank and Checks'), ('general', 'General'), ('situation', 'Opening/Closing Situation')], 'Type', size=32, required=True,
help="Select 'Sale' for customer invoices journals."\
" Select 'Purchase' for supplier invoices journals."\
" Select 'Cash' or 'Bank' for journals that are used in customer or supplier payments."\
" Select 'General' for miscellaneous operations journals."\
" Select 'Opening/Closing Situation' for entries generated for new fiscal years."),
'type_control_ids': fields.many2many('account.account.type', 'account_journal_type_rel', 'journal_id','type_id', 'Type Controls', domain=[('code','<>','view'), ('code', '<>', 'closed')]),
'account_control_ids': fields.many2many('account.account', 'account_account_type_rel', 'journal_id','account_id', 'Account', domain=[('type','<>','view'), ('type', '<>', 'closed')]),
'default_credit_account_id': fields.many2one('account.account', 'Default Credit Account', domain="[('type','!=','view')]", help="It acts as a default account for credit amount"),
'default_debit_account_id': fields.many2one('account.account', 'Default Debit Account', domain="[('type','!=','view')]", help="It acts as a default account for debit amount"),
'centralisation': fields.boolean('Centralized Counterpart', help="Check this box to determine that each entry of this journal won't create a new counterpart but will share the same counterpart. This is used in fiscal year closing."),
'update_posted': fields.boolean('Allow Cancelling Entries', help="Check this box if you want to allow the cancellation the entries related to this journal or of the invoice related to this journal"),
'group_invoice_lines': fields.boolean('Group Invoice Lines', help="If this box is checked, the system will try to group the accounting lines when generating them from invoices."),
'sequence_id': fields.many2one('ir.sequence', 'Entry Sequence', help="This field contains the information related to the numbering of the journal entries of this journal.", required=True, copy=False),
'user_id': fields.many2one('res.users', 'User', help="The user responsible for this journal"),
'groups_id': fields.many2many('res.groups', 'account_journal_group_rel', 'journal_id', 'group_id', 'Groups'),
'currency': fields.many2one('res.currency', 'Currency', help='The currency used to enter statement'),
'entry_posted': fields.boolean('Autopost Created Moves', help='Check this box to automatically post entries of this journal. Note that legally, some entries may be automatically posted when the source document is validated (Invoices), whatever the status of this field.'),
'company_id': fields.many2one('res.company', 'Company', required=True, select=1, help="Company related to this journal"),
'allow_date':fields.boolean('Check Date in Period', help= 'If checked, the entry won\'t be created if the entry date is not included into the selected period'),
'profit_account_id' : fields.many2one('account.account', 'Profit Account'),
'loss_account_id' : fields.many2one('account.account', 'Loss Account'),
'internal_account_id' : fields.many2one('account.account', 'Internal Transfers Account', select=1),
'cash_control' : fields.boolean('Cash Control', help='If you want the journal should be control at opening/closing, check this option'),
'analytic_journal_id':fields.many2one('account.analytic.journal','Analytic Journal', help="Journal for analytic entries"),
'sequence': fields.integer('Sequence',help='Used to order Journals'),
}
_defaults = {
'cash_control' : False,
'with_last_closing_balance' : True,
'user_id': lambda self, cr, uid, context: uid,
'company_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
'sequence': 1,
}
_sql_constraints = [
('code_company_uniq', 'unique (code, company_id)', 'The code of the journal must be unique per company !'),
('name_company_uniq', 'unique (name, company_id)', 'The name of the journal must be unique per company !'),
]
_order = 'sequence,code'
def _check_currency(self, cr, uid, ids, context=None):
for journal in self.browse(cr, uid, ids, context=context):
if journal.currency:
if journal.default_credit_account_id and not journal.default_credit_account_id.currency_id.id == journal.currency.id:
return False
if journal.default_debit_account_id and not journal.default_debit_account_id.currency_id.id == journal.currency.id:
return False
return True
_constraints = [
(_check_currency, 'Configuration error!\nThe currency chosen should be shared by the default accounts too.', ['currency','default_debit_account_id','default_credit_account_id']),
]
def copy(self, cr, uid, id, default=None, context=None):
default = dict(context or {})
journal = self.browse(cr, uid, id, context=context)
default.update(
code=_("%s (copy)") % (journal['code'] or ''),
name=_("%s (copy)") % (journal['name'] or ''))
return super(account_journal, self).copy(cr, uid, id, default, context=context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
for journal in self.browse(cr, uid, ids, context=context):
if 'company_id' in vals and journal.company_id.id != vals['company_id']:
move_lines = self.pool.get('account.move.line').search(cr, uid, [('journal_id', 'in', ids)])
if move_lines:
raise UserError(_('This journal already contains items, therefore you cannot modify its company field.'))
return super(account_journal, self).write(cr, uid, ids, vals, context=context)
def create_sequence(self, cr, uid, vals, context=None):
""" Create new no_gap entry sequence for every new Joural
"""
# in account.journal code is actually the prefix of the sequence
# whereas ir.sequence code is a key to lookup global sequences.
prefix = vals['code'].upper()
seq = {
'name': vals['name'],
'implementation':'no_gap',
'prefix': prefix + "/%(year)s/",
'padding': 4,
'number_increment': 1,
'use_date_range': True,
}
if 'company_id' in vals:
seq['company_id'] = vals['company_id']
return self.pool.get('ir.sequence').create(cr, uid, seq)
def create(self, cr, uid, vals, context=None):
if not 'sequence_id' in vals or not vals['sequence_id']:
# if we have the right to create a journal, we should be able to
# create it's sequence.
vals.update({'sequence_id': self.create_sequence(cr, SUPERUSER_ID, vals, context)})
return super(account_journal, self).create(cr, uid, vals, context)
def name_get(self, cr, user, ids, context=None):
"""
Returns a list of tupples containing id, name.
result format: {[(id, name), (id, name), ...]}
@param cr: A database cursor
@param user: ID of the user currently logged in
@param ids: list of ids for which name should be read
@param context: context arguments, like lang, time zone
@return: Returns a list of tupples containing id, name
"""
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
result = self.browse(cr, user, ids, context=context)
res = []
for rs in result:
if rs.currency:
currency = rs.currency
else:
currency = rs.company_id.currency_id
name = "%s (%s)" % (rs.name, currency.name)
res += [(rs.id, name)]
return res
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
class account_fiscalyear(osv.osv):
_name = "account.fiscalyear"
_description = "Fiscal Year"
_columns = {
'name': fields.char('Fiscal Year', required=True),
'code': fields.char('Code', size=6, required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'date_start': fields.date('Start Date', required=True),
'date_stop': fields.date('End Date', required=True),
'period_ids': fields.one2many('account.period', 'fiscalyear_id', 'Periods'),
'state': fields.selection([('draft','Open'), ('done','Closed')], 'Status', readonly=True, copy=False),
'end_journal_period_id': fields.many2one(
'account.journal.period', 'End of Year Entries Journal',
readonly=True, copy=False),
}
_defaults = {
'state': 'draft',
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
_order = "date_start, id"
def _check_duration(self, cr, uid, ids, context=None):
obj_fy = self.browse(cr, uid, ids[0], context=context)
if obj_fy.date_stop < obj_fy.date_start:
return False
return True
_constraints = [
(_check_duration, 'Error!\nThe start date of a fiscal year must precede its end date.', ['date_start','date_stop'])
]
def create_period3(self, cr, uid, ids, context=None):
return self.create_period(cr, uid, ids, context, 3)
def create_period(self, cr, uid, ids, context=None, interval=1):
period_obj = self.pool.get('account.period')
for fy in self.browse(cr, uid, ids, context=context):
ds = datetime.strptime(fy.date_start, '%Y-%m-%d')
period_obj.create(cr, uid, {
'name': "%s %s" % (_('Opening Period'), ds.strftime('%Y')),
'code': ds.strftime('00/%Y'),
'date_start': ds,
'date_stop': ds,
'special': True,
'fiscalyear_id': fy.id,
})
while ds.strftime('%Y-%m-%d') < fy.date_stop:
de = ds + relativedelta(months=interval, days=-1)
if de.strftime('%Y-%m-%d') > fy.date_stop:
de = datetime.strptime(fy.date_stop, '%Y-%m-%d')
period_obj.create(cr, uid, {
'name': ds.strftime('%m/%Y'),
'code': ds.strftime('%m/%Y'),
'date_start': ds.strftime('%Y-%m-%d'),
'date_stop': de.strftime('%Y-%m-%d'),
'fiscalyear_id': fy.id,
})
ds = ds + relativedelta(months=interval)
return True
def find(self, cr, uid, dt=None, exception=True, context=None):
res = self.finds(cr, uid, dt, exception, context=context)
return res and res[0] or False
def finds(self, cr, uid, dt=None, exception=True, context=None):
if context is None: context = {}
if not dt:
dt = fields.date.context_today(self,cr,uid,context=context)
args = [('date_start', '<=' ,dt), ('date_stop', '>=', dt)]
if context.get('company_id', False):
company_id = context['company_id']
else:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
args.append(('company_id', '=', company_id))
ids = self.search(cr, uid, args, context=context)
if not ids:
if exception:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_fiscalyear')
msg = _('No accounting period is covering this date: %s.') % dt
raise openerp.exceptions.RedirectWarning(msg, action_id, _(' Configure Fiscal Year Now'))
else:
return []
return ids
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):
if args is None:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
class account_period(osv.osv):
_name = "account.period"
_description = "Account period"
_columns = {
'name': fields.char('Period Name', required=True),
'code': fields.char('Code', size=12),
'special': fields.boolean('Opening/Closing Period',help="These periods can overlap."),
'date_start': fields.date('Start of Period', required=True, states={'done':[('readonly',True)]}),
'date_stop': fields.date('End of Period', required=True, states={'done':[('readonly',True)]}),
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True, states={'done':[('readonly',True)]}, select=True),
'state': fields.selection([('draft','Open'), ('done','Closed')], 'Status', readonly=True, copy=False,
help='When monthly periods are created. The status is \'Draft\'. At the end of monthly period it is in \'Done\' status.'),
'company_id': fields.related('fiscalyear_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
_defaults = {
'state': 'draft',
}
_order = "date_start, special desc"
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id)', 'The name of the period must be unique per company!'),
]
def _check_duration(self,cr,uid,ids,context=None):
obj_period = self.browse(cr, uid, ids[0], context=context)
if obj_period.date_stop < obj_period.date_start:
return False
return True
def _check_year_limit(self,cr,uid,ids,context=None):
for obj_period in self.browse(cr, uid, ids, context=context):
if obj_period.special:
continue
if obj_period.fiscalyear_id.date_stop < obj_period.date_stop or \
obj_period.fiscalyear_id.date_stop < obj_period.date_start or \
obj_period.fiscalyear_id.date_start > obj_period.date_start or \
obj_period.fiscalyear_id.date_start > obj_period.date_stop:
return False
pids = self.search(cr, uid, [('date_stop','>=',obj_period.date_start),('date_start','<=',obj_period.date_stop),('special','=',False),('id','<>',obj_period.id)])
for period in self.browse(cr, uid, pids):
if period.fiscalyear_id.company_id.id==obj_period.fiscalyear_id.company_id.id:
return False
return True
_constraints = [
(_check_duration, 'Error!\nThe duration of the Period(s) is/are invalid.', ['date_stop']),
(_check_year_limit, 'Error!\nThe period is invalid. Either some periods are overlapping or the period\'s dates are not matching the scope of the fiscal year.', ['date_stop'])
]
@api.returns('self')
def next(self, cr, uid, period, step, context=None):
ids = self.search(cr, uid, [('date_start','>',period.date_start)])
if len(ids)>=step:
return ids[step-1]
return False
@api.returns('self')
def find(self, cr, uid, dt=None, context=None):
if context is None: context = {}
if not dt:
dt = fields.date.context_today(self, cr, uid, context=context)
args = [('date_start', '<=' ,dt), ('date_stop', '>=', dt)]
if context.get('company_id', False):
args.append(('company_id', '=', context['company_id']))
else:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
args.append(('company_id', '=', company_id))
result = []
if context.get('account_period_prefer_normal', True):
# look for non-special periods first, and fallback to all if no result is found
result = self.search(cr, uid, args + [('special', '=', False)], context=context)
if not result:
result = self.search(cr, uid, args, context=context)
if not result:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_period')
msg = _('No accounting period is covering this date: %s.') % dt
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Configure Periods Now'))
return result
def action_draft(self, cr, uid, ids, context=None):
mode = 'draft'
for period in self.browse(cr, uid, ids):
if period.fiscalyear_id.state == 'done':
raise UserError(_('You can not re-open a period which belongs to closed fiscal year'))
cr.execute('update account_journal_period set state=%s where period_id in %s', (mode, tuple(ids),))
cr.execute('update account_period set state=%s where id in %s', (mode, tuple(ids),))
self.invalidate_cache(cr, uid, context=context)
return True
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'company_id' in vals:
move_lines = self.pool.get('account.move.line').search(cr, uid, [('period_id', 'in', ids)])
if move_lines:
raise UserError(_('This journal already contains items for this period, therefore you cannot modify its company field.'))
return super(account_period, self).write(cr, uid, ids, vals, context=context)
def build_ctx_periods(self, cr, uid, period_from_id, period_to_id):
if period_from_id == period_to_id:
return [period_from_id]
period_from = self.browse(cr, uid, period_from_id)
period_date_start = period_from.date_start
company1_id = period_from.company_id.id
period_to = self.browse(cr, uid, period_to_id)
period_date_stop = period_to.date_stop
company2_id = period_to.company_id.id
if company1_id != company2_id:
raise UserError(_('You should choose the periods that belong to the same company.'))
if period_date_start > period_date_stop:
raise UserError(_('Start period should precede then end period.'))
# /!\ We do not include a criterion on the company_id field below, to allow producing consolidated reports
# on multiple companies. It will only work when start/end periods are selected and no fiscal year is chosen.
#for period from = january, we want to exclude the opening period (but it has same date_from, so we have to check if period_from is special or not to include that clause or not in the search).
if period_from.special:
return self.search(cr, uid, [('date_start', '>=', period_date_start), ('date_stop', '<=', period_date_stop)])
return self.search(cr, uid, [('date_start', '>=', period_date_start), ('date_stop', '<=', period_date_stop), ('special', '=', False)])
class account_journal_period(osv.osv):
_name = "account.journal.period"
_description = "Journal Period"
def _icon_get(self, cr, uid, ids, field_name, arg=None, context=None):
result = {}.fromkeys(ids, 'STOCK_NEW')
for r in self.read(cr, uid, ids, ['state']):
result[r['id']] = {
'draft': 'STOCK_NEW',
'printed': 'STOCK_PRINT_PREVIEW',
'done': 'STOCK_DIALOG_AUTHENTICATION',
}.get(r['state'], 'STOCK_NEW')
return result
_columns = {
'name': fields.char('Journal-Period Name', required=True),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, ondelete="cascade"),
'period_id': fields.many2one('account.period', 'Period', required=True, ondelete="cascade"),
'icon': fields.function(_icon_get, string='Icon', type='char'),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the journal period without removing it."),
'state': fields.selection([('draft','Draft'), ('printed','Printed'), ('done','Done')], 'Status', required=True, readonly=True,
help='When journal period is created. The status is \'Draft\'. If a report is printed it comes to \'Printed\' status. When all transactions are done, it comes in \'Done\' status.'),
'fiscalyear_id': fields.related('period_id', 'fiscalyear_id', string='Fiscal Year', type='many2one', relation='account.fiscalyear'),
'company_id': fields.related('journal_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
def _check(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
cr.execute('select * from account_move_line where journal_id=%s and period_id=%s limit 1', (obj.journal_id.id, obj.period_id.id))
res = cr.fetchall()
if res:
raise UserError(_('You cannot modify/delete a journal with entries for this period.'))
return True
def write(self, cr, uid, ids, vals, context=None):
self._check(cr, uid, ids, context=context)
return super(account_journal_period, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
period_id = vals.get('period_id',False)
if period_id:
period = self.pool.get('account.period').browse(cr, uid, period_id, context=context)
vals['state']=period.state
return super(account_journal_period, self).create(cr, uid, vals, context)
def unlink(self, cr, uid, ids, context=None):
self._check(cr, uid, ids, context=context)
return super(account_journal_period, self).unlink(cr, uid, ids, context=context)
_defaults = {
'state': 'draft',
'active': True,
}
_order = "period_id"
#----------------------------------------------------------
# Entries
#----------------------------------------------------------
class account_move(osv.osv):
_name = "account.move"
_description = "Account Entry"
_order = 'id desc'
def account_assert_balanced(self, cr, uid, context=None):
cr.execute("""\
SELECT move_id
FROM account_move_line
WHERE state = 'valid'
GROUP BY move_id
HAVING abs(sum(debit) - sum(credit)) > 0.00001
""")
assert len(cr.fetchall()) == 0, \
"For all Journal Items, the state is valid implies that the sum " \
"of credits equals the sum of debits"
return True
def account_move_prepare(self, cr, uid, journal_id, date=False, ref='', company_id=False, context=None):
'''
Prepares and returns a dictionary of values, ready to be passed to create() based on the parameters received.
'''
if not date:
date = fields.date.today()
period_obj = self.pool.get('account.period')
if not company_id:
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = user.company_id.id
if context is None:
context = {}
#put the company in context to find the good period
ctx = context.copy()
ctx.update({'company_id': company_id})
return {
'journal_id': journal_id,
'date': date,
'period_id': period_obj.find(cr, uid, date, context=ctx)[0],
'ref': ref,
'company_id': company_id,
}
def name_get(self, cursor, user, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if not ids:
return []
res = []
data_move = self.pool.get('account.move').browse(cursor, user, ids, context=context)
for move in data_move:
if move.state=='draft':
name = '*' + str(move.id)
else:
name = move.name
res.append((move.id, name))
return res
def _get_period(self, cr, uid, context=None):
ctx = dict(context or {})
period_ids = self.pool.get('account.period').find(cr, uid, context=ctx)
return period_ids[0]
def _amount_compute(self, cr, uid, ids, name, args, context, where =''):
if not ids: return {}
cr.execute( 'SELECT move_id, SUM(debit) '\
'FROM account_move_line '\
'WHERE move_id IN %s '\
'GROUP BY move_id', (tuple(ids),))
result = dict(cr.fetchall())
for id in ids:
result.setdefault(id, 0.0)
return result
def _search_amount(self, cr, uid, obj, name, args, context):
ids = set()
for cond in args:
amount = cond[2]
if isinstance(cond[2],(list,tuple)):
if cond[1] in ['in','not in']:
amount = tuple(cond[2])
else:
continue
else:
if cond[1] in ['=like', 'like', 'not like', 'ilike', 'not ilike', 'in', 'not in', 'child_of']:
continue
cr.execute("select move_id from account_move_line group by move_id having sum(debit) %s %%s" % (cond[1]),(amount,))
res_ids = set(id[0] for id in cr.fetchall())
ids = ids and (ids & res_ids) or res_ids
if ids:
return [('id', 'in', tuple(ids))]
return [('id', '=', '0')]
def _get_move_from_lines(self, cr, uid, ids, context=None):
line_obj = self.pool.get('account.move.line')
return [line.move_id.id for line in line_obj.browse(cr, uid, ids, context=context)]
_columns = {
'name': fields.char('Number', required=True, copy=False),
'ref': fields.char('Reference', copy=False),
'period_id': fields.many2one('account.period', 'Period', required=True, states={'posted':[('readonly',True)]}),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, states={'posted':[('readonly',True)]}),
'state': fields.selection(
[('draft','Unposted'), ('posted','Posted')], 'Status',
required=True, readonly=True, copy=False,
help='All manually created new journal entries are usually in the status \'Unposted\', '
'but you can set the option to skip that status on the related journal. '
'In that case, they will behave as journal entries automatically created by the '
'system on document validation (invoices, bank statements...) and will be created '
'in \'Posted\' status.'),
'line_id': fields.one2many('account.move.line', 'move_id', 'Entries',
states={'posted':[('readonly',True)]},
copy=True),
'to_check': fields.boolean('To Review', help='Check this box if you are unsure of that journal entry and if you want to note it as \'to be reviewed\' by an accounting expert.'),
'partner_id': fields.related('line_id', 'partner_id', type="many2one", relation="res.partner", string="Partner", store={
_name: (lambda self, cr,uid,ids,c: ids, ['line_id'], 10),
'account.move.line': (_get_move_from_lines, ['partner_id'],10)
}),
'amount': fields.function(_amount_compute, string='Amount', digits_compute=dp.get_precision('Account'), type='float', fnct_search=_search_amount),
'date': fields.date('Date', required=True, states={'posted':[('readonly',True)]}, select=True),
'narration':fields.text('Internal Note'),
'company_id': fields.related('journal_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'balance': fields.float('balance', digits_compute=dp.get_precision('Account'), help="This is a field only used for internal purpose and shouldn't be displayed"),
'statement_line_id': fields.many2one('account.bank.statement.line', 'Bank statement line reconciled with this entry', copy=False, readonly=True)
}
_defaults = {
'name': '/',
'state': 'draft',
'period_id': _get_period,
'date': fields.date.context_today,
'company_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
def _check_centralisation(self, cursor, user, ids, context=None):
for move in self.browse(cursor, user, ids, context=context):
if move.journal_id.centralisation:
move_ids = self.search(cursor, user, [
('period_id', '=', move.period_id.id),
('journal_id', '=', move.journal_id.id),
])
if len(move_ids) > 1:
return False
return True
_constraints = [
(_check_centralisation,
'You cannot create more than one move per period on a centralized journal.',
['journal_id']),
]
def post(self, cr, uid, ids, context=None):
if context is None:
context = {}
invoice = context.get('invoice', False)
valid_moves = self.validate(cr, uid, ids, context)
if not valid_moves:
raise UserError(_('You cannot validate a non-balanced entry.\nMake sure you have configured payment terms properly.\nThe latest payment term line should be of the "Balance" type.'))
obj_sequence = self.pool.get('ir.sequence')
for move in self.browse(cr, uid, valid_moves, context=context):
if move.name =='/':
new_name = False
journal = move.journal_id
if invoice and invoice.internal_number:
new_name = invoice.internal_number
else:
if journal.sequence_id:
c = {'ir_sequence_date': move.period_id.date_start}
new_name = obj_sequence.next_by_id(cr, uid, journal.sequence_id.id, c)
else:
raise UserError(_('Please define a sequence on the journal.'))
if new_name:
self.write(cr, uid, [move.id], {'name':new_name})
cr.execute('UPDATE account_move '\
'SET state=%s '\
'WHERE id IN %s',
('posted', tuple(valid_moves),))
self.invalidate_cache(cr, uid, context=context)
return True
def button_validate(self, cursor, user, ids, context=None):
for move in self.browse(cursor, user, ids, context=context):
# check that all accounts have the same topmost ancestor
top_common = None
for line in move.line_id:
account = line.account_id
top_account = account
while top_account.parent_id:
top_account = top_account.parent_id
if not top_common:
top_common = top_account
elif top_account.id != top_common.id:
raise UserError(_('You cannot validate this journal entry because account "%s" does not belong to chart of accounts "%s".') % (account.name, top_common.name))
return self.post(cursor, user, ids, context=context)
def button_cancel(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if not line.journal_id.update_posted:
raise UserError(_('You cannot modify a posted entry of this journal.\nFirst you should set the journal to allow cancelling entries.'))
if ids:
cr.execute('UPDATE account_move '\
'SET state=%s '\
'WHERE id IN %s', ('draft', tuple(ids),))
self.invalidate_cache(cr, uid, context=context)
return True
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
c = context.copy()
c['novalidate'] = True
result = super(account_move, self).write(cr, uid, ids, vals, c)
self.validate(cr, uid, ids, context=context)
return result
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
if vals.get('line_id'):
if vals.get('journal_id'):
for l in vals['line_id']:
if not l[0]:
l[2]['journal_id'] = vals['journal_id']
context['journal_id'] = vals['journal_id']
if 'period_id' in vals:
for l in vals['line_id']:
if not l[0]:
l[2]['period_id'] = vals['period_id']
context['period_id'] = vals['period_id']
else:
default_period = self._get_period(cr, uid, context)
for l in vals['line_id']:
if not l[0]:
l[2]['period_id'] = default_period
context['period_id'] = default_period
c = context.copy()
c['novalidate'] = True
c['period_id'] = vals['period_id'] if 'period_id' in vals else self._get_period(cr, uid, context)
c['journal_id'] = vals['journal_id']
if 'date' in vals: c['date'] = vals['date']
result = super(account_move, self).create(cr, uid, vals, c)
tmp = self.validate(cr, uid, [result], context)
journal = self.pool.get('account.journal').browse(cr, uid, vals['journal_id'], context)
if journal.entry_posted and tmp:
self.button_validate(cr,uid, [result], context)
else:
result = super(account_move, self).create(cr, uid, vals, context)
return result
def unlink(self, cr, uid, ids, context=None, check=True):
context = dict(context or {})
if isinstance(ids, (int, long)):
ids = [ids]
toremove = []
obj_move_line = self.pool.get('account.move.line')
for move in self.browse(cr, uid, ids, context=context):
if move['state'] != 'draft':
raise UserError(_('You cannot delete a posted journal entry "%s".') % move['name'])
for line in move.line_id:
if line.invoice:
raise UserError(_("Move cannot be deleted if linked to an invoice. (Invoice: %s - Move ID:%s)") % (line.invoice.number,move.name))
line_ids = map(lambda x: x.id, move.line_id)
context['journal_id'] = move.journal_id.id
context['period_id'] = move.period_id.id
obj_move_line._update_check(cr, uid, line_ids, context)
obj_move_line.unlink(cr, uid, line_ids, context=context)
toremove.append(move.id)
result = super(account_move, self).unlink(cr, uid, toremove, context)
return result
def _compute_balance(self, cr, uid, id, context=None):
move = self.browse(cr, uid, id, context=context)
amount = 0
for line in move.line_id:
amount+= (line.debit - line.credit)
return amount
def _centralise(self, cr, uid, move, mode, context=None):
assert mode in ('debit', 'credit'), 'Invalid Mode' #to prevent sql injection
currency_obj = self.pool.get('res.currency')
account_move_line_obj = self.pool.get('account.move.line')
context = dict(context or {})
if mode=='credit':
account_id = move.journal_id.default_debit_account_id.id
mode2 = 'debit'
if not account_id:
raise UserError(_('There is no default debit account defined \non journal "%s".') % move.journal_id.name)
else:
account_id = move.journal_id.default_credit_account_id.id
mode2 = 'credit'
if not account_id:
raise UserError(_('There is no default credit account defined \non journal "%s".') % move.journal_id.name)
# find the first line of this move with the current mode
# or create it if it doesn't exist
cr.execute('select id from account_move_line where move_id=%s and centralisation=%s limit 1', (move.id, mode))
res = cr.fetchone()
if res:
line_id = res[0]
else:
context.update({'journal_id': move.journal_id.id, 'period_id': move.period_id.id})
line_id = account_move_line_obj.create(cr, uid, {
'name': _(mode.capitalize()+' Centralisation'),
'centralisation': mode,
'partner_id': False,
'account_id': account_id,
'move_id': move.id,
'journal_id': move.journal_id.id,
'period_id': move.period_id.id,
'date': move.period_id.date_stop,
'debit': 0.0,
'credit': 0.0,
}, context)
# find the first line of this move with the other mode
# so that we can exclude it from our calculation
cr.execute('select id from account_move_line where move_id=%s and centralisation=%s limit 1', (move.id, mode2))
res = cr.fetchone()
if res:
line_id2 = res[0]
else:
line_id2 = 0
cr.execute('SELECT SUM(%s) FROM account_move_line WHERE move_id=%%s AND id!=%%s' % (mode,), (move.id, line_id2))
result = cr.fetchone()[0] or 0.0
cr.execute('update account_move_line set '+mode2+'=%s where id=%s', (result, line_id))
account_move_line_obj.invalidate_cache(cr, uid, [mode2], [line_id], context=context)
#adjust also the amount in currency if needed
cr.execute("select currency_id, sum(amount_currency) as amount_currency from account_move_line where move_id = %s and currency_id is not null group by currency_id", (move.id,))
for row in cr.dictfetchall():
currency_id = currency_obj.browse(cr, uid, row['currency_id'], context=context)
if not currency_obj.is_zero(cr, uid, currency_id, row['amount_currency']):
amount_currency = row['amount_currency'] * -1
account_id = amount_currency > 0 and move.journal_id.default_debit_account_id.id or move.journal_id.default_credit_account_id.id
cr.execute('select id from account_move_line where move_id=%s and centralisation=\'currency\' and currency_id = %slimit 1', (move.id, row['currency_id']))
res = cr.fetchone()
if res:
cr.execute('update account_move_line set amount_currency=%s , account_id=%s where id=%s', (amount_currency, account_id, res[0]))
account_move_line_obj.invalidate_cache(cr, uid, ['amount_currency', 'account_id'], [res[0]], context=context)
else:
context.update({'journal_id': move.journal_id.id, 'period_id': move.period_id.id})
line_id = account_move_line_obj.create(cr, uid, {
'name': _('Currency Adjustment'),
'centralisation': 'currency',
'partner_id': False,
'account_id': account_id,
'move_id': move.id,
'journal_id': move.journal_id.id,
'period_id': move.period_id.id,
'date': move.period_id.date_stop,
'debit': 0.0,
'credit': 0.0,
'currency_id': row['currency_id'],
'amount_currency': amount_currency,
}, context)
return True
#
# Validate a balanced move. If it is a centralised journal, create a move.
#
def validate(self, cr, uid, ids, context=None):
if context and ('__last_update' in context):
del context['__last_update']
valid_moves = [] #Maintains a list of moves which can be responsible to create analytic entries
obj_analytic_line = self.pool.get('account.analytic.line')
obj_move_line = self.pool.get('account.move.line')
for move in self.browse(cr, uid, ids, context):
journal = move.journal_id
amount = 0
line_ids = []
line_draft_ids = []
company_id = None
# makes sure we don't use outdated period
obj_move_line._update_journal_check(cr, uid, journal.id, move.period_id.id, context=context)
for line in move.line_id:
amount += line.debit - line.credit
line_ids.append(line.id)
if line.state=='draft':
line_draft_ids.append(line.id)
if not company_id:
company_id = line.account_id.company_id.id
if not company_id == line.account_id.company_id.id:
raise UserError(_("Cannot create moves for different companies."))
if line.account_id.currency_id and line.currency_id:
if line.account_id.currency_id.id != line.currency_id.id and (line.account_id.currency_id.id != line.account_id.company_id.currency_id.id):
raise UserError(_("""Cannot create move with currency different from ..""") % (line.account_id.code, line.account_id.name))
if abs(amount) < 10 ** -4:
# If the move is balanced
# Add to the list of valid moves
# (analytic lines will be created later for valid moves)
valid_moves.append(move)
# Check whether the move lines are confirmed
if not line_draft_ids:
continue
# Update the move lines (set them as valid)
obj_move_line.write(cr, uid, line_draft_ids, {
'state': 'valid'
}, context, check=False)
account = {}
account2 = {}
if journal.type in ('purchase','sale'):
for line in move.line_id:
code = amount = 0
key = (line.account_id.id, line.tax_code_id.id)
if key in account2:
code = account2[key][0]
amount = account2[key][1] * (line.debit + line.credit)
elif line.account_id.id in account:
code = account[line.account_id.id][0]
amount = account[line.account_id.id][1] * (line.debit + line.credit)
if (code or amount) and not (line.tax_code_id or line.tax_amount):
obj_move_line.write(cr, uid, [line.id], {
'tax_code_id': code,
'tax_amount': amount
}, context, check=False)
elif journal.centralisation:
# If the move is not balanced, it must be centralised...
# Add to the list of valid moves
# (analytic lines will be created later for valid moves)
valid_moves.append(move)
#
# Update the move lines (set them as valid)
#
self._centralise(cr, uid, move, 'debit', context=context)
self._centralise(cr, uid, move, 'credit', context=context)
obj_move_line.write(cr, uid, line_draft_ids, {
'state': 'valid'
}, context, check=False)
else:
# We can't validate it (it's unbalanced)
# Setting the lines as draft
not_draft_line_ids = list(set(line_ids) - set(line_draft_ids))
if not_draft_line_ids:
obj_move_line.write(cr, uid, not_draft_line_ids, {
'state': 'draft'
}, context, check=False)
# Create analytic lines for the valid moves
for record in valid_moves:
obj_move_line.create_analytic_lines(cr, uid, [line.id for line in record.line_id], context)
valid_moves = [move.id for move in valid_moves]
return len(valid_moves) > 0 and valid_moves or False
class account_move_reconcile(osv.osv):
_name = "account.move.reconcile"
_description = "Account Reconciliation"
_columns = {
'name': fields.char('Name', required=True),
'type': fields.char('Type', required=True),
'line_id': fields.one2many('account.move.line', 'reconcile_id', 'Entry Lines'),
'line_partial_ids': fields.one2many('account.move.line', 'reconcile_partial_id', 'Partial Entry lines'),
'create_date': fields.date('Creation date', readonly=True),
'opening_reconciliation': fields.boolean('Opening Entries Reconciliation', help="Is this reconciliation produced by the opening of a new fiscal year ?."),
}
_defaults = {
'name': lambda self,cr,uid,ctx=None: self.pool.get('ir.sequence').next_by_code(cr, uid, 'account.reconcile', context=ctx) or '/',
}
# You cannot unlink a reconciliation if it is a opening_reconciliation one,
# you should use the generate opening entries wizard for that
def unlink(self, cr, uid, ids, context=None):
for move_rec in self.browse(cr, uid, ids, context=context):
if move_rec.opening_reconciliation:
raise UserError(_('You cannot unreconcile journal items if they has been generated by the opening/closing fiscal year process.'))
return super(account_move_reconcile, self).unlink(cr, uid, ids, context=context)
# Look in the line_id and line_partial_ids to ensure the partner is the same or empty
# on all lines. We allow that only for opening/closing period
def _check_same_partner(self, cr, uid, ids, context=None):
for reconcile in self.browse(cr, uid, ids, context=context):
move_lines = []
if not reconcile.opening_reconciliation:
if reconcile.line_id:
first_partner = reconcile.line_id[0].partner_id.id
move_lines = reconcile.line_id
elif reconcile.line_partial_ids:
first_partner = reconcile.line_partial_ids[0].partner_id.id
move_lines = reconcile.line_partial_ids
if any([(line.account_id.type in ('receivable', 'payable') and line.partner_id.id != first_partner) for line in move_lines]):
return False
return True
_constraints = [
(_check_same_partner, 'You can only reconcile journal items with the same partner.', ['line_id', 'line_partial_ids']),
]
def reconcile_partial_check(self, cr, uid, ids, type='auto', context=None):
total = 0.0
for rec in self.browse(cr, uid, ids, context=context):
for line in rec.line_partial_ids:
if line.account_id.currency_id:
total += line.amount_currency
else:
total += (line.debit or 0.0) - (line.credit or 0.0)
if not total:
self.pool.get('account.move.line').write(cr, uid,
map(lambda x: x.id, rec.line_partial_ids),
{'reconcile_id': rec.id },
context=context
)
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
result = []
for r in self.browse(cr, uid, ids, context=context):
total = reduce(lambda y,t: (t.debit or 0.0) - (t.credit or 0.0) + y, r.line_partial_ids, 0.0)
if total:
name = '%s (%.2f)' % (r.name, total)
result.append((r.id,name))
else:
result.append((r.id,r.name))
return result
#----------------------------------------------------------
# Tax
#----------------------------------------------------------
"""
a documenter
child_depend: la taxe depend des taxes filles
"""
class account_tax_code(osv.osv):
"""
A code for the tax object.
This code is used for some tax declarations.
"""
def _sum(self, cr, uid, ids, name, args, context, where ='', where_params=()):
parent_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)]))
if context.get('based_on', 'invoices') == 'payments':
cr.execute('SELECT line.tax_code_id, sum(line.tax_amount) \
FROM account_move_line AS line, \
account_move AS move \
LEFT JOIN account_invoice invoice ON \
(invoice.move_id = move.id) \
WHERE line.tax_code_id IN %s '+where+' \
AND move.id = line.move_id \
AND ((invoice.state = \'paid\') \
OR (invoice.id IS NULL)) \
GROUP BY line.tax_code_id',
(parent_ids,) + where_params)
else:
cr.execute('SELECT line.tax_code_id, sum(line.tax_amount) \
FROM account_move_line AS line, \
account_move AS move \
WHERE line.tax_code_id IN %s '+where+' \
AND move.id = line.move_id \
GROUP BY line.tax_code_id',
(parent_ids,) + where_params)
res=dict(cr.fetchall())
obj_precision = self.pool.get('decimal.precision')
res2 = {}
for record in self.browse(cr, uid, ids, context=context):
def _rec_get(record):
amount = res.get(record.id, 0.0)
for rec in record.child_ids:
amount += _rec_get(rec) * rec.sign
return amount
res2[record.id] = round(_rec_get(record), obj_precision.precision_get(cr, uid, 'Account'))
return res2
def _sum_year(self, cr, uid, ids, name, args, context=None):
if context is None:
context = {}
move_state = ('posted', )
if context.get('state', 'all') == 'all':
move_state = ('draft', 'posted', )
if context.get('fiscalyear_id', False):
fiscalyear_id = [context['fiscalyear_id']]
else:
fiscalyear_id = self.pool.get('account.fiscalyear').finds(cr, uid, exception=False)
where = ''
where_params = ()
if fiscalyear_id:
pids = []
for fy in fiscalyear_id:
pids += map(lambda x: str(x.id), self.pool.get('account.fiscalyear').browse(cr, uid, fy).period_ids)
if pids:
where = ' AND line.period_id IN %s AND move.state IN %s '
where_params = (tuple(pids), move_state)
return self._sum(cr, uid, ids, name, args, context,
where=where, where_params=where_params)
def _sum_period(self, cr, uid, ids, name, args, context):
if context is None:
context = {}
move_state = ('posted', )
if context.get('state', False) == 'all':
move_state = ('draft', 'posted', )
if context.get('period_id', False):
period_id = context['period_id']
else:
period_id = self.pool.get('account.period').find(cr, uid, context=context)
if not period_id:
return dict.fromkeys(ids, 0.0)
period_id = period_id[0]
return self._sum(cr, uid, ids, name, args, context,
where=' AND line.period_id=%s AND move.state IN %s', where_params=(period_id, move_state))
_name = 'account.tax.code'
_description = 'Tax Code'
_rec_name = 'code'
_order = 'sequence, code'
_columns = {
'name': fields.char('Tax Case Name', required=True, translate=True),
'code': fields.char('Case Code', size=64),
'info': fields.text('Description'),
'sum': fields.function(_sum_year, string="Year Sum"),
'sum_period': fields.function(_sum_period, string="Period Sum"),
'parent_id': fields.many2one('account.tax.code', 'Parent Code', select=True),
'child_ids': fields.one2many('account.tax.code', 'parent_id', 'Child Codes'),
'line_ids': fields.one2many('account.move.line', 'tax_code_id', 'Lines'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'sign': fields.float('Coefficent for parent', required=True, help='You can specify here the coefficient that will be used when consolidating the amount of this case into its parent. For example, set 1/-1 if you want to add/substract it.'),
'notprintable':fields.boolean("Not Printable in Invoice", help="Check this box if you don't want any tax related to this tax code to appear on invoices"),
'sequence': fields.integer('Sequence', help="Determine the display order in the report 'Accounting \ Reporting \ Generic Reporting \ Taxes \ Taxes Report'"),
}
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):
if not args:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','code'], context=context, load='_classic_write')
return [(x['id'], (x['code'] and (x['code'] + ' - ') or '') + x['name']) \
for x in reads]
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
_defaults = {
'company_id': _default_company,
'sign': 1.0,
'notprintable': False,
}
_check_recursion = check_cycle
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive accounts.', ['parent_id'])
]
_order = 'code'
def get_precision_tax():
def change_digit_tax(cr):
res = openerp.registry(cr.dbname)['decimal.precision'].precision_get(cr, SUPERUSER_ID, 'Account')
return (16, res+3)
return change_digit_tax
class account_tax(osv.osv):
"""
A tax object.
Type: percent, fixed, none, code
PERCENT: tax = price * amount
FIXED: tax = price + amount
NONE: no tax line
CODE: execute python code. localcontext = {'price_unit':pu}
return result in the context
Ex: result=round(price_unit*0.21,4)
"""
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
this = self.browse(cr, uid, id, context=context)
tmp_default = dict(default, name=_("%s (Copy)") % this.name)
return super(account_tax, self).copy_data(cr, uid, id, default=tmp_default, context=context)
_name = 'account.tax'
_description = 'Tax'
_columns = {
'name': fields.char('Tax Name', required=True, translate=True, help="This name will be displayed on reports"),
'sequence': fields.integer('Sequence', required=True, help="The sequence field is used to order the tax lines from the lowest sequences to the higher ones. The order is important if you have a tax with several tax children. In this case, the evaluation order is important."),
'amount': fields.float('Amount', required=True, digits_compute=get_precision_tax(), help="For taxes of type percentage, enter % ratio between 0-1."),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the tax without removing it."),
'type': fields.selection( [('percent','Percentage'), ('fixed','Fixed Amount'), ('none','None'), ('code','Python Code'), ('balance','Balance')], 'Tax Type', required=True,
help="The computation method for the tax amount."),
'applicable_type': fields.selection( [('true','Always'), ('code','Given by Python Code')], 'Applicability', required=True,
help="If not applicable (computed through a Python code), the tax won't appear on the invoice."),
'domain':fields.char('Domain', help="This field is only used if you develop your own module allowing developers to create specific taxes in a custom domain."),
'account_collected_id':fields.many2one('account.account', 'Invoice Tax Account', ondelete='restrict', help="Set the account that will be set by default on invoice tax lines for invoices. Leave empty to use the expense account."),
'account_paid_id':fields.many2one('account.account', 'Refund Tax Account', ondelete='restrict', help="Set the account that will be set by default on invoice tax lines for refunds. Leave empty to use the expense account."),
'account_analytic_collected_id':fields.many2one('account.analytic.account', 'Invoice Tax Analytic Account', help="Set the analytic account that will be used by default on the invoice tax lines for invoices. Leave empty if you don't want to use an analytic account on the invoice tax lines by default."),
'account_analytic_paid_id':fields.many2one('account.analytic.account', 'Refund Tax Analytic Account', help="Set the analytic account that will be used by default on the invoice tax lines for refunds. Leave empty if you don't want to use an analytic account on the invoice tax lines by default."),
'parent_id':fields.many2one('account.tax', 'Parent Tax Account', select=True),
'child_ids':fields.one2many('account.tax', 'parent_id', 'Child Tax Accounts'),
'child_depend':fields.boolean('Tax on Children', help="Set if the tax computation is based on the computation of child taxes rather than on the total amount."),
'python_compute':fields.text('Python Code'),
'python_compute_inv':fields.text('Python Code (reverse)'),
'python_applicable':fields.text('Applicable Code'),
#
# Fields used for the Tax declaration
#
'base_code_id': fields.many2one('account.tax.code', 'Account Base Code', help="Use this code for the tax declaration."),
'tax_code_id': fields.many2one('account.tax.code', 'Account Tax Code', help="Use this code for the tax declaration."),
'base_sign': fields.float('Base Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
'tax_sign': fields.float('Tax Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
# Same fields for refund invoices
'ref_base_code_id': fields.many2one('account.tax.code', 'Refund Base Code', help="Use this code for the tax declaration."),
'ref_tax_code_id': fields.many2one('account.tax.code', 'Refund Tax Code', help="Use this code for the tax declaration."),
'ref_base_sign': fields.float('Refund Base Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
'ref_tax_sign': fields.float('Refund Tax Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
'include_base_amount': fields.boolean('Included in base amount', help="Indicates if the amount of tax must be included in the base amount for the computation of the next taxes"),
'company_id': fields.many2one('res.company', 'Company', required=True),
'description': fields.char('Tax Code'),
'price_include': fields.boolean('Tax Included in Price', help="Check this if the price you use on the product and invoices includes this tax."),
'type_tax_use': fields.selection([('sale','Sale'),('purchase','Purchase'),('all','All')], 'Tax Application', required=True)
}
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id)', 'Tax Name must be unique per company!'),
]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):
"""
Returns a list of tupples containing id, name, as internally it is called {def name_get}
result format: {[(id, name), (id, name), ...]}
@param cr: A database cursor
@param user: ID of the user currently logged in
@param name: name to search
@param args: other arguments
@param operator: default operator is 'ilike', it can be changed
@param context: context arguments, like lang, time zone
@param limit: Returns first 'n' ids of complete result, default is 80.
@return: Returns a list of tupples containing id and name
"""
if not args:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('description', operator, name), ('name', operator, name)]
else:
domain = ['|', ('description', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('type', False) and vals['type'] in ('none', 'code'):
vals.update({'amount': 0.0})
return super(account_tax, self).write(cr, uid, ids, vals, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
journal_pool = self.pool.get('account.journal')
if context.get('type'):
if context.get('type') in ('out_invoice','out_refund'):
args += [('type_tax_use','in',['sale','all'])]
elif context.get('type') in ('in_invoice','in_refund'):
args += [('type_tax_use','in',['purchase','all'])]
if context.get('journal_id'):
journal = journal_pool.browse(cr, uid, context.get('journal_id'))
if journal.type in ('sale', 'purchase'):
args += [('type_tax_use','in',[journal.type,'all'])]
return super(account_tax, self).search(cr, uid, args, offset, limit, order, context, count)
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
res = []
for record in self.read(cr, uid, ids, ['description','name'], context=context):
name = record['description'] and record['description'] or record['name']
res.append((record['id'],name ))
return res
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
_defaults = {
'python_compute': '''# price_unit\n# or False\n# product: product.product object or None\n# partner: res.partner object or None\n\nresult = price_unit * 0.10''',
'python_compute_inv': '''# price_unit\n# product: product.product object or False\n\nresult = price_unit * 0.10''',
'applicable_type': 'true',
'type': 'percent',
'amount': 0,
'price_include': 0,
'active': 1,
'type_tax_use': 'all',
'sequence': 1,
'ref_tax_sign': 1,
'ref_base_sign': 1,
'tax_sign': 1,
'base_sign': 1,
'include_base_amount': False,
'company_id': _default_company,
}
_order = 'sequence'
def _applicable(self, cr, uid, taxes, price_unit, product=None, partner=None):
res = []
for tax in taxes:
if tax.applicable_type=='code':
localdict = {'price_unit':price_unit, 'product':product, 'partner':partner}
exec tax.python_applicable in localdict
if localdict.get('result', False):
res.append(tax)
else:
res.append(tax)
return res
def _unit_compute(self, cr, uid, taxes, price_unit, product=None, partner=None, quantity=0):
taxes = self._applicable(cr, uid, taxes, price_unit ,product, partner)
res = []
cur_price_unit=price_unit
for tax in taxes:
# we compute the amount for the current tax object and append it to the result
data = {'id':tax.id,
'name':tax.description and tax.description + " - " + tax.name or tax.name,
'account_collected_id':tax.account_collected_id.id,
'account_paid_id':tax.account_paid_id.id,
'account_analytic_collected_id': tax.account_analytic_collected_id.id,
'account_analytic_paid_id': tax.account_analytic_paid_id.id,
'base_code_id': tax.base_code_id.id,
'ref_base_code_id': tax.ref_base_code_id.id,
'sequence': tax.sequence,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'price_unit': cur_price_unit,
'tax_code_id': tax.tax_code_id.id,
'ref_tax_code_id': tax.ref_tax_code_id.id,
}
res.append(data)
if tax.type=='percent':
amount = cur_price_unit * tax.amount
data['amount'] = amount
elif tax.type=='fixed':
data['amount'] = tax.amount
data['tax_amount']=quantity
# data['amount'] = quantity
elif tax.type=='code':
localdict = {'price_unit':cur_price_unit, 'product':product, 'partner':partner, 'quantity': quantity}
exec tax.python_compute in localdict
amount = localdict['result']
data['amount'] = amount
elif tax.type=='balance':
data['amount'] = cur_price_unit - reduce(lambda x,y: y.get('amount',0.0)+x, res, 0.0)
data['balance'] = cur_price_unit
amount2 = data.get('amount', 0.0)
if tax.child_ids:
if tax.child_depend:
latest = res.pop()
amount = amount2
child_tax = self._unit_compute(cr, uid, tax.child_ids, amount, product, partner, quantity)
res.extend(child_tax)
for child in child_tax:
amount2 += child.get('amount', 0.0)
if tax.child_depend:
for r in res:
for name in ('base','ref_base'):
if latest[name+'_code_id'] and latest[name+'_sign'] and not r[name+'_code_id']:
r[name+'_code_id'] = latest[name+'_code_id']
r[name+'_sign'] = latest[name+'_sign']
r['price_unit'] = latest['price_unit']
latest[name+'_code_id'] = False
for name in ('tax','ref_tax'):
if latest[name+'_code_id'] and latest[name+'_sign'] and not r[name+'_code_id']:
r[name+'_code_id'] = latest[name+'_code_id']
r[name+'_sign'] = latest[name+'_sign']
r['amount'] = data['amount']
latest[name+'_code_id'] = False
if tax.include_base_amount:
cur_price_unit+=amount2
return res
def compute_for_bank_reconciliation(self, cr, uid, tax_id, amount, context=None):
""" Called by RPC by the bank statement reconciliation widget """
tax = self.browse(cr, uid, tax_id, context=context)
return self.compute_all(cr, uid, [tax], amount, 1) # TOCHECK may use force_exclude parameter
@api.v7
def compute_all(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None, force_excluded=False):
"""
:param force_excluded: boolean used to say that we don't want to consider the value of field price_include of
tax. It's used in encoding by line where you don't matter if you encoded a tax with that boolean to True or
False
RETURN: {
'total': 0.0, # Total without taxes
'total_included: 0.0, # Total with taxes
'taxes': [] # List of taxes, see compute for the format
}
"""
# By default, for each tax, tax amount will first be computed
# and rounded at the 'Account' decimal precision for each
# PO/SO/invoice line and then these rounded amounts will be
# summed, leading to the total amount for that tax. But, if the
# company has tax_calculation_rounding_method = round_globally,
# we still follow the same method, but we use a much larger
# precision when we round the tax amount for each line (we use
# the 'Account' decimal precision + 5), and that way it's like
# rounding after the sum of the tax amounts of each line
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
tax_compute_precision = precision
if taxes and taxes[0].company_id.tax_calculation_rounding_method == 'round_globally':
tax_compute_precision += 5
totalin = totalex = round(price_unit * quantity, precision)
tin = []
tex = []
for tax in taxes:
if not tax.price_include or force_excluded:
tex.append(tax)
else:
tin.append(tax)
tin = self.compute_inv(cr, uid, tin, price_unit, quantity, product=product, partner=partner, precision=tax_compute_precision)
for r in tin:
totalex -= r.get('amount', 0.0)
totlex_qty = 0.0
try:
totlex_qty = totalex/quantity
except:
pass
tex = self._compute(cr, uid, tex, totlex_qty, quantity, product=product, partner=partner, precision=tax_compute_precision)
for r in tex:
totalin += r.get('amount', 0.0)
return {
'total': totalex,
'total_included': totalin,
'taxes': tin + tex
}
@api.v8
def compute_all(self, price_unit, quantity, product=None, partner=None, force_excluded=False):
return self._model.compute_all(
self._cr, self._uid, self, price_unit, quantity,
product=product, partner=partner, force_excluded=force_excluded)
def compute(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None):
_logger.info("Deprecated, use compute_all(...)['taxes'] instead of compute(...) to manage prices with tax included.")
return self._compute(cr, uid, taxes, price_unit, quantity, product, partner)
def _compute(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None, precision=None):
"""
Compute tax values for given PRICE_UNIT, QUANTITY and a buyer/seller ADDRESS_ID.
RETURN:
[ tax ]
tax = {'name':'', 'amount':0.0, 'account_collected_id':1, 'account_paid_id':2}
one tax for each tax id in IDS and their children
"""
if not precision:
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
res = self._unit_compute(cr, uid, taxes, price_unit, product, partner, quantity)
total = 0.0
for r in res:
if r.get('balance',False):
r['amount'] = round(r.get('balance', 0.0) * quantity, precision) - total
else:
r['amount'] = round(r.get('amount', 0.0) * quantity, precision)
total += r['amount']
return res
def _unit_compute_inv(self, cr, uid, taxes, price_unit, product=None, partner=None):
taxes = self._applicable(cr, uid, taxes, price_unit, product, partner)
res = []
taxes.reverse()
cur_price_unit = price_unit
tax_parent_tot = 0.0
for tax in taxes:
if (tax.type=='percent') and not tax.include_base_amount:
tax_parent_tot += tax.amount
for tax in taxes:
if (tax.type=='fixed') and not tax.include_base_amount:
cur_price_unit -= tax.amount
for tax in taxes:
if tax.type=='percent':
if tax.include_base_amount:
amount = cur_price_unit - (cur_price_unit / (1 + tax.amount))
else:
amount = (cur_price_unit / (1 + tax_parent_tot)) * tax.amount
elif tax.type=='fixed':
amount = tax.amount
elif tax.type=='code':
localdict = {'price_unit':cur_price_unit, 'product':product, 'partner':partner}
exec tax.python_compute_inv in localdict
amount = localdict['result']
elif tax.type=='balance':
amount = cur_price_unit - reduce(lambda x,y: y.get('amount',0.0)+x, res, 0.0)
if tax.include_base_amount:
cur_price_unit -= amount
todo = 0
else:
todo = 1
res.append({
'id': tax.id,
'todo': todo,
'name': tax.name,
'amount': amount,
'account_collected_id': tax.account_collected_id.id,
'account_paid_id': tax.account_paid_id.id,
'account_analytic_collected_id': tax.account_analytic_collected_id.id,
'account_analytic_paid_id': tax.account_analytic_paid_id.id,
'base_code_id': tax.base_code_id.id,
'ref_base_code_id': tax.ref_base_code_id.id,
'sequence': tax.sequence,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'price_unit': cur_price_unit,
'tax_code_id': tax.tax_code_id.id,
'ref_tax_code_id': tax.ref_tax_code_id.id,
})
if tax.child_ids:
if tax.child_depend:
del res[-1]
amount = price_unit
parent_tax = self._unit_compute_inv(cr, uid, tax.child_ids, amount, product, partner)
res.extend(parent_tax)
total = 0.0
for r in res:
if r['todo']:
total += r['amount']
for r in res:
r['price_unit'] -= total
r['todo'] = 0
return res
def compute_inv(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None, precision=None):
"""
Compute tax values for given PRICE_UNIT, QUANTITY and a buyer/seller ADDRESS_ID.
Price Unit is a Tax included price
RETURN:
[ tax ]
tax = {'name':'', 'amount':0.0, 'account_collected_id':1, 'account_paid_id':2}
one tax for each tax id in IDS and their children
"""
if not precision:
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
res = self._unit_compute_inv(cr, uid, taxes, price_unit, product, partner=None)
total = 0.0
for r in res:
if r.get('balance',False):
r['amount'] = round(r['balance'] * quantity, precision) - total
else:
r['amount'] = round(r['amount'] * quantity, precision)
total += r['amount']
return res
# ---------------------------------------------------------
# Account Entries Models
# ---------------------------------------------------------
class account_model(osv.osv):
_name = "account.model"
_description = "Account Model"
_columns = {
'name': fields.char('Model Name', required=True, help="This is a model for recurring accounting entries"),
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
'company_id': fields.related('journal_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'lines_id': fields.one2many('account.model.line', 'model_id', 'Model Entries', copy=True),
'legend': fields.text('Legend', readonly=True, size=100),
}
_defaults = {
'legend': lambda self, cr, uid, context:_('You can specify year, month and date in the name of the model using the following labels:\n\n%(year)s: To Specify Year \n%(month)s: To Specify Month \n%(date)s: Current Date\n\ne.g. My model on %(date)s'),
}
def generate(self, cr, uid, ids, data=None, context=None):
if data is None:
data = {}
move_ids = []
entry = {}
account_move_obj = self.pool.get('account.move')
account_move_line_obj = self.pool.get('account.move.line')
pt_obj = self.pool.get('account.payment.term')
period_obj = self.pool.get('account.period')
if context is None:
context = {}
if data.get('date', False):
context = dict(context)
context.update({'date': data['date']})
move_date = context.get('date', time.strftime('%Y-%m-%d'))
move_date = datetime.strptime(move_date,"%Y-%m-%d")
for model in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
ctx.update({'company_id': model.company_id.id})
period_ids = period_obj.find(cr, uid, dt=context.get('date', False), context=ctx)
period_id = period_ids and period_ids[0] or False
ctx.update({'journal_id': model.journal_id.id,'period_id': period_id})
try:
entry['name'] = model.name%{'year': move_date.strftime('%Y'), 'month': move_date.strftime('%m'), 'date': move_date.strftime('%Y-%m')}
except:
raise UserError(_('You have a wrong expression "%(...)s" in your model!'))
move_id = account_move_obj.create(cr, uid, {
'ref': entry['name'],
'period_id': period_id,
'journal_id': model.journal_id.id,
'date': context.get('date', fields.date.context_today(self,cr,uid,context=context))
})
move_ids.append(move_id)
for line in model.lines_id:
analytic_account_id = False
if line.analytic_account_id:
if not model.journal_id.analytic_journal_id:
raise UserError(_("You have to define an analytic journal on the '%s' journal!") % (model.journal_id.name,))
analytic_account_id = line.analytic_account_id.id
val = {
'move_id': move_id,
'journal_id': model.journal_id.id,
'period_id': period_id,
'analytic_account_id': analytic_account_id
}
date_maturity = context.get('date',time.strftime('%Y-%m-%d'))
if line.date_maturity == 'partner':
if not line.partner_id:
raise UserError(_("Maturity date of entry line generated by model line '%s' of model '%s' is based on partner payment term!" \
"\nPlease define partner on it!")%(line.name, model.name))
payment_term_id = False
if model.journal_id.type in ('purchase', 'purchase_refund') and line.partner_id.property_supplier_payment_term:
payment_term_id = line.partner_id.property_supplier_payment_term.id
elif line.partner_id.property_payment_term:
payment_term_id = line.partner_id.property_payment_term.id
if payment_term_id:
pterm_list = pt_obj.compute(cr, uid, payment_term_id, value=1, date_ref=date_maturity)
if pterm_list:
pterm_list = [l[0] for l in pterm_list]
pterm_list.sort()
date_maturity = pterm_list[-1]
val.update({
'name': line.name,
'quantity': line.quantity,
'debit': line.debit,
'credit': line.credit,
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': line.partner_id.id,
'date': context.get('date', fields.date.context_today(self,cr,uid,context=context)),
'date_maturity': date_maturity
})
account_move_line_obj.create(cr, uid, val, context=ctx)
return move_ids
def onchange_journal_id(self, cr, uid, ids, journal_id, context=None):
company_id = False
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
if journal.company_id.id:
company_id = journal.company_id.id
return {'value': {'company_id': company_id}}
class account_model_line(osv.osv):
_name = "account.model.line"
_description = "Account Model Entries"
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', required=True, help="The sequence field is used to order the resources from lower sequences to higher ones."),
'quantity': fields.float('Quantity', digits_compute=dp.get_precision('Account'), help="The optional quantity on entries."),
'debit': fields.float('Debit', digits_compute=dp.get_precision('Account')),
'credit': fields.float('Credit', digits_compute=dp.get_precision('Account')),
'account_id': fields.many2one('account.account', 'Account', required=True, ondelete="cascade"),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', ondelete="cascade"),
'model_id': fields.many2one('account.model', 'Model', required=True, ondelete="cascade", select=True),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency."),
'currency_id': fields.many2one('res.currency', 'Currency'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'date_maturity': fields.selection([('today','Date of the day'), ('partner','Partner Payment Term')], 'Maturity Date', help="The maturity date of the generated entries for this model. You can choose between the creation date or the creation date of the entries plus the partner payment terms."),
}
_order = 'sequence'
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in model, they must be positive!'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in model, they must be positive!'),
]
# ---------------------------------------------------------
# Account Subscription
# ---------------------------------------------------------
class account_subscription(osv.osv):
_name = "account.subscription"
_description = "Account Subscription"
_columns = {
'name': fields.char('Name', required=True),
'ref': fields.char('Reference'),
'model_id': fields.many2one('account.model', 'Model', required=True),
'date_start': fields.date('Start Date', required=True),
'period_total': fields.integer('Number of Periods', required=True),
'period_nbr': fields.integer('Period', required=True),
'period_type': fields.selection([('day','days'),('month','month'),('year','year')], 'Period Type', required=True),
'state': fields.selection([('draft','Draft'),('running','Running'),('done','Done')], 'Status', required=True, readonly=True, copy=False),
'lines_id': fields.one2many('account.subscription.line', 'subscription_id', 'Subscription Lines', copy=True)
}
_defaults = {
'date_start': fields.date.context_today,
'period_type': 'month',
'period_total': 12,
'period_nbr': 1,
'state': 'draft',
}
def state_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'draft'})
return False
def check(self, cr, uid, ids, context=None):
todone = []
for sub in self.browse(cr, uid, ids, context=context):
ok = True
for line in sub.lines_id:
if not line.move_id.id:
ok = False
break
if ok:
todone.append(sub.id)
if todone:
self.write(cr, uid, todone, {'state':'done'})
return False
def remove_line(self, cr, uid, ids, context=None):
toremove = []
for sub in self.browse(cr, uid, ids, context=context):
for line in sub.lines_id:
if not line.move_id.id:
toremove.append(line.id)
if toremove:
self.pool.get('account.subscription.line').unlink(cr, uid, toremove)
self.write(cr, uid, ids, {'state':'draft'})
return False
def compute(self, cr, uid, ids, context=None):
for sub in self.browse(cr, uid, ids, context=context):
ds = sub.date_start
for i in range(sub.period_total):
self.pool.get('account.subscription.line').create(cr, uid, {
'date': ds,
'subscription_id': sub.id,
})
if sub.period_type=='day':
ds = (datetime.strptime(ds, '%Y-%m-%d') + relativedelta(days=sub.period_nbr)).strftime('%Y-%m-%d')
if sub.period_type=='month':
ds = (datetime.strptime(ds, '%Y-%m-%d') + relativedelta(months=sub.period_nbr)).strftime('%Y-%m-%d')
if sub.period_type=='year':
ds = (datetime.strptime(ds, '%Y-%m-%d') + relativedelta(years=sub.period_nbr)).strftime('%Y-%m-%d')
self.write(cr, uid, ids, {'state':'running'})
return True
class account_subscription_line(osv.osv):
_name = "account.subscription.line"
_description = "Account Subscription Line"
_columns = {
'subscription_id': fields.many2one('account.subscription', 'Subscription', required=True, select=True),
'date': fields.date('Date', required=True),
'move_id': fields.many2one('account.move', 'Entry'),
}
def move_create(self, cr, uid, ids, context=None):
tocheck = {}
all_moves = []
obj_model = self.pool.get('account.model')
for line in self.browse(cr, uid, ids, context=context):
data = {
'date': line.date,
}
move_ids = obj_model.generate(cr, uid, [line.subscription_id.model_id.id], data, context)
tocheck[line.subscription_id.id] = True
self.write(cr, uid, [line.id], {'move_id':move_ids[0]})
all_moves.extend(move_ids)
if tocheck:
self.pool.get('account.subscription').check(cr, uid, tocheck.keys(), context)
return all_moves
_rec_name = 'date'
# ---------------------------------------------------------------
# Account Templates: Account, Tax, Tax Code and chart. + Wizard
# ---------------------------------------------------------------
class account_tax_template(osv.osv):
_name = 'account.tax.template'
class account_account_template(osv.osv):
_order = "code"
_name = "account.account.template"
_description ='Templates for Accounts'
_columns = {
'name': fields.char('Name', required=True, select=True),
'currency_id': fields.many2one('res.currency', 'Secondary Currency', help="Forces all moves for this account to have this secondary currency."),
'code': fields.char('Code', size=64, required=True, select=1),
'type': fields.selection([
('receivable','Receivable'),
('payable','Payable'),
('view','View'),
('consolidation','Consolidation'),
('liquidity','Liquidity'),
('other','Regular'),
('closed','Closed'),
], 'Internal Type', required=True,help="This type is used to differentiate types with "\
"special effects in Odoo: view can not have entries, consolidation are accounts that "\
"can have children accounts for multi-company consolidations, payable/receivable are for "\
"partners accounts (for debit/credit computations), closed for depreciated accounts."),
'user_type': fields.many2one('account.account.type', 'Account Type', required=True,
help="These types are defined according to your country. The type contains more information "\
"about the account and its specificities."),
'financial_report_ids': fields.many2many('account.financial.report', 'account_template_financial_report', 'account_template_id', 'report_line_id', 'Financial Reports'),
'reconcile': fields.boolean('Allow Reconciliation', help="Check this option if you want the user to reconcile entries in this account."),
'shortcut': fields.char('Shortcut', size=12),
'note': fields.text('Note'),
'parent_id': fields.many2one('account.account.template', 'Parent Account Template', ondelete='cascade', domain=[('type','=','view')]),
'child_parent_ids':fields.one2many('account.account.template', 'parent_id', 'Children'),
'tax_ids': fields.many2many('account.tax.template', 'account_account_template_tax_rel', 'account_id', 'tax_id', 'Default Taxes'),
'nocreate': fields.boolean('Optional create', help="If checked, the new chart of accounts will not contain this by default."),
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', help="This optional field allow you to link an account template to a specific chart template that may differ from the one its root parent belongs to. This allow you to define chart templates that extend another and complete it with few new accounts (You don't need to define the whole structure that is common to both several times)."),
}
_defaults = {
'reconcile': False,
'type': 'view',
'nocreate': False,
}
_check_recursion = check_cycle
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive account templates.', ['parent_id']),
]
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['name','code'], context=context)
res = []
for record in reads:
name = record['name']
if record['code']:
name = record['code']+' '+name
res.append((record['id'],name ))
return res
def generate_account(self, cr, uid, chart_template_id, tax_template_ref, acc_template_ref, code_digits, company_id, context=None):
"""
This method for generating accounts from templates.
:param chart_template_id: id of the chart template chosen in the wizard
:param tax_template_ref: Taxes templates reference for write taxes_id in account_account.
:paramacc_template_ref: dictionary with the mappping between the account templates and the real accounts.
:param code_digits: number of digits got from wizard.multi.charts.accounts, this is use for account code.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: return acc_template_ref for reference purpose.
:rtype: dict
"""
if context is None:
context = {}
obj_acc = self.pool.get('account.account')
company_name = self.pool.get('res.company').browse(cr, uid, company_id, context=context).name
template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
#deactivate the parent_store functionnality on account_account for rapidity purpose
ctx = context.copy()
ctx.update({'defer_parent_store_computation': True})
level_ref = {}
children_acc_criteria = [('chart_template_id','=', chart_template_id)]
if template.account_root_id.id:
children_acc_criteria = ['|'] + children_acc_criteria + ['&',('parent_id','child_of', [template.account_root_id.id]),('chart_template_id','=', False)]
children_acc_template = self.search(cr, uid, [('nocreate','!=',True)] + children_acc_criteria, order='id')
for account_template in self.browse(cr, uid, children_acc_template, context=context):
# skip the root of COA if it's not the main one
if (template.account_root_id.id == account_template.id) and template.parent_id:
continue
tax_ids = []
for tax in account_template.tax_ids:
tax_ids.append(tax_template_ref[tax.id])
code_main = account_template.code and len(account_template.code) or 0
code_acc = account_template.code or ''
if code_main > 0 and code_main <= code_digits and account_template.type != 'view':
code_acc = str(code_acc) + (str('0'*(code_digits-code_main)))
parent_id = account_template.parent_id and ((account_template.parent_id.id in acc_template_ref) and acc_template_ref[account_template.parent_id.id]) or False
#the level as to be given as well at the creation time, because of the defer_parent_store_computation in
#context. Indeed because of this, the parent_left and parent_right are not computed and thus the child_of
#operator does not return the expected values, with result of having the level field not computed at all.
if parent_id:
level = parent_id in level_ref and level_ref[parent_id] + 1 or obj_acc._get_level(cr, uid, [parent_id], 'level', None, context=context)[parent_id] + 1
else:
level = 0
vals={
'name': (template.account_root_id.id == account_template.id) and company_name or account_template.name,
'currency_id': account_template.currency_id and account_template.currency_id.id or False,
'code': code_acc,
'type': account_template.type,
'user_type': account_template.user_type and account_template.user_type.id or False,
'reconcile': account_template.reconcile,
'shortcut': account_template.shortcut,
'note': account_template.note,
'financial_report_ids': account_template.financial_report_ids and [(6,0,[x.id for x in account_template.financial_report_ids])] or False,
'parent_id': parent_id,
'tax_ids': [(6,0,tax_ids)],
'company_id': company_id,
'level': level,
}
new_account = obj_acc.create(cr, uid, vals, context=ctx)
acc_template_ref[account_template.id] = new_account
level_ref[new_account] = level
#reactivate the parent_store functionnality on account_account
obj_acc._parent_store_compute(cr)
return acc_template_ref
class account_add_tmpl_wizard(osv.osv_memory):
"""Add one more account from the template.
With the 'nocreate' option, some accounts may not be created. Use this to add them later."""
_name = 'account.addtmpl.wizard'
def _get_def_cparent(self, cr, uid, context=None):
acc_obj = self.pool.get('account.account')
tmpl_obj = self.pool.get('account.account.template')
tids = tmpl_obj.read(cr, uid, [context['tmpl_ids']], ['parent_id'])
if not tids or not tids[0]['parent_id']:
return False
ptids = tmpl_obj.read(cr, uid, [tids[0]['parent_id'][0]], ['code'])
res = None
if not ptids or not ptids[0]['code']:
raise UserError(_('There is no parent code for the template account.'))
res = acc_obj.search(cr, uid, [('code','=',ptids[0]['code'])])
return res and res[0] or False
_columns = {
'cparent_id':fields.many2one('account.account', 'Parent target', help="Creates an account with the selected template under this existing parent.", required=True),
}
_defaults = {
'cparent_id': _get_def_cparent,
}
def action_create(self,cr,uid,ids,context=None):
if context is None:
context = {}
acc_obj = self.pool.get('account.account')
tmpl_obj = self.pool.get('account.account.template')
data = self.read(cr, uid, ids)[0]
company_id = acc_obj.read(cr, uid, [data['cparent_id'][0]], ['company_id'])[0]['company_id'][0]
account_template = tmpl_obj.browse(cr, uid, context['tmpl_ids'])
vals = {
'name': account_template.name,
'currency_id': account_template.currency_id and account_template.currency_id.id or False,
'code': account_template.code,
'type': account_template.type,
'user_type': account_template.user_type and account_template.user_type.id or False,
'reconcile': account_template.reconcile,
'shortcut': account_template.shortcut,
'note': account_template.note,
'parent_id': data['cparent_id'][0],
'company_id': company_id,
}
acc_obj.create(cr, uid, vals)
return {'type':'state', 'state': 'end' }
def action_cancel(self, cr, uid, ids, context=None):
return { 'type': 'state', 'state': 'end' }
class account_tax_code_template(osv.osv):
_name = 'account.tax.code.template'
_description = 'Tax Code Template'
_order = 'sequence, code'
_rec_name = 'code'
_columns = {
'name': fields.char('Tax Case Name', required=True),
'code': fields.char('Case Code', size=64),
'info': fields.text('Description'),
'parent_id': fields.many2one('account.tax.code.template', 'Parent Code', select=True),
'child_ids': fields.one2many('account.tax.code.template', 'parent_id', 'Child Codes'),
'sign': fields.float('Sign For Parent', required=True),
'notprintable':fields.boolean("Not Printable in Invoice", help="Check this box if you don't want any tax related to this tax Code to appear on invoices."),
'sequence': fields.integer(
'Sequence', help=(
"Determine the display order in the report 'Accounting "
"\ Reporting \ Generic Reporting \ Taxes \ Taxes Report'"),
),
}
_defaults = {
'sign': 1.0,
'notprintable': False,
}
def generate_tax_code(self, cr, uid, tax_code_root_id, company_id, context=None):
'''
This function generates the tax codes from the templates of tax code that are children of the given one passed
in argument. Then it returns a dictionary with the mappping between the templates and the real objects.
:param tax_code_root_id: id of the root of all the tax code templates to process
:param company_id: id of the company the wizard is running for
:returns: dictionary with the mappping between the templates and the real objects.
:rtype: dict
'''
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_tax_code = self.pool.get('account.tax.code')
tax_code_template_ref = {}
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
#find all the children of the tax_code_root_id
children_tax_code_template = tax_code_root_id and obj_tax_code_template.search(cr, uid, [('parent_id','child_of',[tax_code_root_id])], order='id') or []
for tax_code_template in obj_tax_code_template.browse(cr, uid, children_tax_code_template, context=context):
vals = {
'name': (tax_code_root_id == tax_code_template.id) and company.name or tax_code_template.name,
'code': tax_code_template.code,
'info': tax_code_template.info,
'parent_id': tax_code_template.parent_id and ((tax_code_template.parent_id.id in tax_code_template_ref) and tax_code_template_ref[tax_code_template.parent_id.id]) or False,
'company_id': company_id,
'sign': tax_code_template.sign,
'sequence': tax_code_template.sequence,
}
#check if this tax code already exists
rec_list = obj_tax_code.search(cr, uid, [('name', '=', vals['name']),('code', '=', vals['code']),('company_id', '=', vals['company_id'])], context=context)
if not rec_list:
#if not yet, create it
new_tax_code = obj_tax_code.create(cr, uid, vals)
#recording the new tax code to do the mapping
tax_code_template_ref[tax_code_template.id] = new_tax_code
return tax_code_template_ref
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','code'], context=context, load='_classic_write')
return [(x['id'], (x['code'] and x['code'] + ' - ' or '') + x['name']) \
for x in reads]
_check_recursion = check_cycle
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive Tax Codes.', ['parent_id'])
]
_order = 'code,name'
class account_chart_template(osv.osv):
_name="account.chart.template"
_description= "Templates for Account Chart"
_columns={
'name': fields.char('Name', required=True),
'parent_id': fields.many2one('account.chart.template', 'Parent Chart Template'),
'code_digits': fields.integer('# of Digits', required=True, help="No. of Digits to use for account code"),
'visible': fields.boolean('Can be Visible?', help="Set this to False if you don't want this template to be used actively in the wizard that generate Chart of Accounts from templates, this is useful when you want to generate accounts of this template only when loading its child template."),
'currency_id': fields.many2one('res.currency', 'Currency'),
'complete_tax_set': fields.boolean('Complete Set of Taxes', help='This boolean helps you to choose if you want to propose to the user to encode the sale and purchase rates or choose from list of taxes. This last choice assumes that the set of tax defined on this template is complete'),
'account_root_id': fields.many2one('account.account.template', 'Root Account', domain=[('parent_id','=',False)]),
'tax_code_root_id': fields.many2one('account.tax.code.template', 'Root Tax Code', domain=[('parent_id','=',False)]),
'tax_template_ids': fields.one2many('account.tax.template', 'chart_template_id', 'Tax Template List', help='List of all the taxes that have to be installed by the wizard'),
'bank_account_view_id': fields.many2one('account.account.template', 'Bank Account'),
'property_account_receivable': fields.many2one('account.account.template', 'Receivable Account'),
'property_account_payable': fields.many2one('account.account.template', 'Payable Account'),
'property_account_expense_categ': fields.many2one('account.account.template', 'Expense Category Account'),
'property_account_income_categ': fields.many2one('account.account.template', 'Income Category Account'),
'property_account_expense': fields.many2one('account.account.template', 'Expense Account on Product Template'),
'property_account_income': fields.many2one('account.account.template', 'Income Account on Product Template'),
'property_account_income_opening': fields.many2one('account.account.template', 'Opening Entries Income Account'),
'property_account_expense_opening': fields.many2one('account.account.template', 'Opening Entries Expense Account'),
}
_defaults = {
'visible': True,
'code_digits': 6,
'complete_tax_set': True,
}
class account_tax_template(osv.osv):
_name = 'account.tax.template'
_description = 'Templates for Taxes'
_columns = {
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', required=True),
'name': fields.char('Tax Name', required=True),
'sequence': fields.integer('Sequence', required=True, help="The sequence field is used to order the taxes lines from lower sequences to higher ones. The order is important if you have a tax that has several tax children. In this case, the evaluation order is important."),
'amount': fields.float('Amount', required=True, digits_compute=get_precision_tax(), help="For Tax Type percent enter % ratio between 0-1."),
'type': fields.selection( [('percent','Percent'), ('fixed','Fixed'), ('none','None'), ('code','Python Code'), ('balance','Balance')], 'Tax Type', required=True),
'applicable_type': fields.selection( [('true','True'), ('code','Python Code')], 'Applicable Type', required=True, help="If not applicable (computed through a Python code), the tax won't appear on the invoice."),
'domain':fields.char('Domain', help="This field is only used if you develop your own module allowing developers to create specific taxes in a custom domain."),
'account_collected_id':fields.many2one('account.account.template', 'Invoice Tax Account'),
'account_paid_id':fields.many2one('account.account.template', 'Refund Tax Account'),
'parent_id':fields.many2one('account.tax.template', 'Parent Tax Account', select=True),
'child_depend':fields.boolean('Tax on Children', help="Set if the tax computation is based on the computation of child taxes rather than on the total amount."),
'python_compute':fields.text('Python Code'),
'python_compute_inv':fields.text('Python Code (reverse)'),
'python_applicable':fields.text('Applicable Code'),
#
# Fields used for the Tax declaration
#
'base_code_id': fields.many2one('account.tax.code.template', 'Base Code', help="Use this code for the tax declaration."),
'tax_code_id': fields.many2one('account.tax.code.template', 'Tax Code', help="Use this code for the tax declaration."),
'base_sign': fields.float('Base Code Sign', help="Usually 1 or -1."),
'tax_sign': fields.float('Tax Code Sign', help="Usually 1 or -1."),
# Same fields for refund invoices
'ref_base_code_id': fields.many2one('account.tax.code.template', 'Refund Base Code', help="Use this code for the tax declaration."),
'ref_tax_code_id': fields.many2one('account.tax.code.template', 'Refund Tax Code', help="Use this code for the tax declaration."),
'ref_base_sign': fields.float('Refund Base Code Sign', help="Usually 1 or -1."),
'ref_tax_sign': fields.float('Refund Tax Code Sign', help="Usually 1 or -1."),
'include_base_amount': fields.boolean('Include in Base Amount', help="Set if the amount of tax must be included in the base amount before computing the next taxes."),
'description': fields.char('Internal Name'),
'type_tax_use': fields.selection([('sale','Sale'),('purchase','Purchase'),('all','All')], 'Tax Use In', required=True,),
'price_include': fields.boolean('Tax Included in Price', help="Check this if the price you use on the product and invoices includes this tax."),
}
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
res = []
for record in self.read(cr, uid, ids, ['description','name'], context=context):
name = record['description'] and record['description'] or record['name']
res.append((record['id'],name ))
return res
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
_defaults = {
'python_compute': lambda *a: '''# price_unit\n# product: product.product object or None\n# partner: res.partner object or None\n\nresult = price_unit * 0.10''',
'python_compute_inv': lambda *a: '''# price_unit\n# product: product.product object or False\n\nresult = price_unit * 0.10''',
'applicable_type': 'true',
'type': 'percent',
'amount': 0,
'sequence': 1,
'ref_tax_sign': 1,
'ref_base_sign': 1,
'tax_sign': 1,
'base_sign': 1,
'include_base_amount': False,
'type_tax_use': 'all',
'price_include': 0,
}
_order = 'sequence'
def _generate_tax(self, cr, uid, tax_templates, tax_code_template_ref, company_id, context=None):
"""
This method generate taxes from templates.
:param tax_templates: list of browse record of the tax templates to process
:param tax_code_template_ref: Taxcode templates reference.
:param company_id: id of the company the wizard is running for
:returns:
{
'tax_template_to_tax': mapping between tax template and the newly generated taxes corresponding,
'account_dict': dictionary containing a to-do list with all the accounts to assign on new taxes
}
"""
if context is None:
context = {}
res = {}
todo_dict = {}
tax_template_to_tax = {}
for tax in tax_templates:
vals_tax = {
'name':tax.name,
'sequence': tax.sequence,
'amount': tax.amount,
'type': tax.type,
'applicable_type': tax.applicable_type,
'domain': tax.domain,
'parent_id': tax.parent_id and ((tax.parent_id.id in tax_template_to_tax) and tax_template_to_tax[tax.parent_id.id]) or False,
'child_depend': tax.child_depend,
'python_compute': tax.python_compute,
'python_compute_inv': tax.python_compute_inv,
'python_applicable': tax.python_applicable,
'base_code_id': tax.base_code_id and ((tax.base_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.base_code_id.id]) or False,
'tax_code_id': tax.tax_code_id and ((tax.tax_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.tax_code_id.id]) or False,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_code_id': tax.ref_base_code_id and ((tax.ref_base_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.ref_base_code_id.id]) or False,
'ref_tax_code_id': tax.ref_tax_code_id and ((tax.ref_tax_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.ref_tax_code_id.id]) or False,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'include_base_amount': tax.include_base_amount,
'description': tax.description,
'company_id': company_id,
'type_tax_use': tax.type_tax_use,
'price_include': tax.price_include
}
new_tax = self.pool.get('account.tax').create(cr, uid, vals_tax)
tax_template_to_tax[tax.id] = new_tax
#as the accounts have not been created yet, we have to wait before filling these fields
todo_dict[new_tax] = {
'account_collected_id': tax.account_collected_id and tax.account_collected_id.id or False,
'account_paid_id': tax.account_paid_id and tax.account_paid_id.id or False,
}
res.update({'tax_template_to_tax': tax_template_to_tax, 'account_dict': todo_dict})
return res
# Fiscal Position Templates
class account_fiscal_position_template(osv.osv):
_name = 'account.fiscal.position.template'
_description = 'Template for Fiscal Position'
_columns = {
'name': fields.char('Fiscal Position Template', required=True),
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', required=True),
'account_ids': fields.one2many('account.fiscal.position.account.template', 'position_id', 'Account Mapping'),
'tax_ids': fields.one2many('account.fiscal.position.tax.template', 'position_id', 'Tax Mapping'),
'note': fields.text('Notes'),
}
def generate_fiscal_position(self, cr, uid, chart_temp_id, tax_template_ref, acc_template_ref, company_id, context=None):
"""
This method generate Fiscal Position, Fiscal Position Accounts and Fiscal Position Taxes from templates.
:param chart_temp_id: Chart Template Id.
:param taxes_ids: Taxes templates reference for generating account.fiscal.position.tax.
:param acc_template_ref: Account templates reference for generating account.fiscal.position.account.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
if context is None:
context = {}
obj_tax_fp = self.pool.get('account.fiscal.position.tax')
obj_ac_fp = self.pool.get('account.fiscal.position.account')
obj_fiscal_position = self.pool.get('account.fiscal.position')
fp_ids = self.search(cr, uid, [('chart_template_id', '=', chart_temp_id)])
for position in self.browse(cr, uid, fp_ids, context=context):
new_fp = obj_fiscal_position.create(cr, uid, {'company_id': company_id, 'name': position.name, 'note': position.note})
for tax in position.tax_ids:
obj_tax_fp.create(cr, uid, {
'tax_src_id': tax_template_ref[tax.tax_src_id.id],
'tax_dest_id': tax.tax_dest_id and tax_template_ref[tax.tax_dest_id.id] or False,
'position_id': new_fp
})
for acc in position.account_ids:
obj_ac_fp.create(cr, uid, {
'account_src_id': acc_template_ref[acc.account_src_id.id],
'account_dest_id': acc_template_ref[acc.account_dest_id.id],
'position_id': new_fp
})
return True
class account_fiscal_position_tax_template(osv.osv):
_name = 'account.fiscal.position.tax.template'
_description = 'Template Tax Fiscal Position'
_rec_name = 'position_id'
_columns = {
'position_id': fields.many2one('account.fiscal.position.template', 'Fiscal Position', required=True, ondelete='cascade'),
'tax_src_id': fields.many2one('account.tax.template', 'Tax Source', required=True),
'tax_dest_id': fields.many2one('account.tax.template', 'Replacement Tax')
}
class account_fiscal_position_account_template(osv.osv):
_name = 'account.fiscal.position.account.template'
_description = 'Template Account Fiscal Mapping'
_rec_name = 'position_id'
_columns = {
'position_id': fields.many2one('account.fiscal.position.template', 'Fiscal Mapping', required=True, ondelete='cascade'),
'account_src_id': fields.many2one('account.account.template', 'Account Source', domain=[('type','<>','view')], required=True),
'account_dest_id': fields.many2one('account.account.template', 'Account Destination', domain=[('type','<>','view')], required=True)
}
# ---------------------------------------------------------
# Account generation from template wizards
# ---------------------------------------------------------
class wizard_multi_charts_accounts(osv.osv_memory):
"""
Create a new account chart for a company.
Wizards ask for:
* a company
* an account chart template
* a number of digits for formatting code of non-view accounts
* a list of bank accounts owned by the company
Then, the wizard:
* generates all accounts from the template and assigns them to the right company
* generates all taxes and tax codes, changing account assignations
* generates all accounting properties and assigns them correctly
"""
_name='wizard.multi.charts.accounts'
_inherit = 'res.config'
_columns = {
'company_id':fields.many2one('res.company', 'Company', required=True),
'currency_id': fields.many2one('res.currency', 'Currency', help="Currency as per company's country."),
'only_one_chart_template': fields.boolean('Only One Chart Template Available'),
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', required=True),
'bank_accounts_id': fields.one2many('account.bank.accounts.wizard', 'bank_account_id', 'Cash and Banks', required=True),
'code_digits':fields.integer('# of Digits', required=True, help="No. of Digits to use for account code"),
"sale_tax": fields.many2one("account.tax.template", "Default Sale Tax"),
"purchase_tax": fields.many2one("account.tax.template", "Default Purchase Tax"),
'sale_tax_rate': fields.float('Sales Tax(%)'),
'purchase_tax_rate': fields.float('Purchase Tax(%)'),
'complete_tax_set': fields.boolean('Complete Set of Taxes', help='This boolean helps you to choose if you want to propose to the user to encode the sales and purchase rates or use the usual m2o fields. This last choice assumes that the set of tax defined for the chosen template is complete'),
}
def _get_chart_parent_ids(self, cr, uid, chart_template, context=None):
""" Returns the IDs of all ancestor charts, including the chart itself.
(inverse of child_of operator)
:param browse_record chart_template: the account.chart.template record
:return: the IDS of all ancestor charts, including the chart itself.
"""
result = [chart_template.id]
while chart_template.parent_id:
chart_template = chart_template.parent_id
result.append(chart_template.id)
return result
def onchange_tax_rate(self, cr, uid, ids, rate=False, context=None):
return {'value': {'purchase_tax_rate': rate or False}}
def onchange_chart_template_id(self, cr, uid, ids, chart_template_id=False, context=None):
res = {}
tax_templ_obj = self.pool.get('account.tax.template')
res['value'] = {'complete_tax_set': False, 'sale_tax': False, 'purchase_tax': False}
if chart_template_id:
data = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
currency_id = data.currency_id and data.currency_id.id or self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
res['value'].update({'complete_tax_set': data.complete_tax_set, 'currency_id': currency_id})
if data.complete_tax_set:
# default tax is given by the lowest sequence. For same sequence we will take the latest created as it will be the case for tax created while isntalling the generic chart of account
chart_ids = self._get_chart_parent_ids(cr, uid, data, context=context)
base_tax_domain = [("chart_template_id", "in", chart_ids), ('parent_id', '=', False)]
sale_tax_domain = base_tax_domain + [('type_tax_use', 'in', ('sale','all'))]
purchase_tax_domain = base_tax_domain + [('type_tax_use', 'in', ('purchase','all'))]
sale_tax_ids = tax_templ_obj.search(cr, uid, sale_tax_domain, order="sequence, id desc")
purchase_tax_ids = tax_templ_obj.search(cr, uid, purchase_tax_domain, order="sequence, id desc")
res['value'].update({'sale_tax': sale_tax_ids and sale_tax_ids[0] or False,
'purchase_tax': purchase_tax_ids and purchase_tax_ids[0] or False})
res.setdefault('domain', {})
res['domain']['sale_tax'] = repr(sale_tax_domain)
res['domain']['purchase_tax'] = repr(purchase_tax_domain)
if data.code_digits:
res['value'].update({'code_digits': data.code_digits})
return res
def default_get(self, cr, uid, fields, context=None):
res = super(wizard_multi_charts_accounts, self).default_get(cr, uid, fields, context=context)
tax_templ_obj = self.pool.get('account.tax.template')
account_chart_template = self.pool['account.chart.template']
if 'bank_accounts_id' in fields:
res.update({'bank_accounts_id': [{'acc_name': _('Cash'), 'account_type': 'cash'},{'acc_name': _('Bank'), 'account_type': 'bank'}]})
if 'company_id' in fields:
res.update({'company_id': self.pool.get('res.users').browse(cr, uid, [uid], context=context)[0].company_id.id})
if 'currency_id' in fields:
company_id = res.get('company_id') or False
if company_id:
company_obj = self.pool.get('res.company')
country_id = company_obj.browse(cr, uid, company_id, context=context).country_id.id
currency_id = company_obj.on_change_country(cr, uid, company_id, country_id, context=context)['value']['currency_id']
res.update({'currency_id': currency_id})
ids = account_chart_template.search(cr, uid, [('visible', '=', True)], context=context)
if ids:
#in order to set default chart which was last created set max of ids.
chart_id = max(ids)
if context.get("default_charts"):
model_data = self.pool.get('ir.model.data').search_read(cr, uid, [('model','=','account.chart.template'),('module','=',context.get("default_charts"))], ['res_id'], context=context)
if model_data:
chart_id = model_data[0]['res_id']
chart = account_chart_template.browse(cr, uid, chart_id, context=context)
chart_hierarchy_ids = self._get_chart_parent_ids(cr, uid, chart, context=context)
if 'chart_template_id' in fields:
res.update({'only_one_chart_template': len(ids) == 1,
'chart_template_id': chart_id})
if 'sale_tax' in fields:
sale_tax_ids = tax_templ_obj.search(cr, uid, [("chart_template_id", "in", chart_hierarchy_ids),
('type_tax_use', 'in', ('sale','all'))],
order="sequence")
res.update({'sale_tax': sale_tax_ids and sale_tax_ids[0] or False})
if 'purchase_tax' in fields:
purchase_tax_ids = tax_templ_obj.search(cr, uid, [("chart_template_id", "in", chart_hierarchy_ids),
('type_tax_use', 'in', ('purchase','all'))],
order="sequence")
res.update({'purchase_tax': purchase_tax_ids and purchase_tax_ids[0] or False})
res.update({
'purchase_tax_rate': 15.0,
'sale_tax_rate': 15.0,
})
return res
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:context = {}
res = super(wizard_multi_charts_accounts, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
cmp_select = []
acc_template_obj = self.pool.get('account.chart.template')
company_obj = self.pool.get('res.company')
company_ids = company_obj.search(cr, uid, [], context=context)
#display in the widget selection of companies, only the companies that haven't been configured yet (but don't care about the demo chart of accounts)
cr.execute("SELECT company_id FROM account_account WHERE active = 't' AND account_account.parent_id IS NULL AND name != %s", ("Chart For Automated Tests",))
configured_cmp = [r[0] for r in cr.fetchall()]
unconfigured_cmp = list(set(company_ids)-set(configured_cmp))
for field in res['fields']:
if field == 'company_id':
res['fields'][field]['domain'] = [('id','in',unconfigured_cmp)]
res['fields'][field]['selection'] = [('', '')]
if unconfigured_cmp:
cmp_select = [(line.id, line.name) for line in company_obj.browse(cr, uid, unconfigured_cmp)]
res['fields'][field]['selection'] = cmp_select
return res
def check_created_journals(self, cr, uid, vals_journal, company_id, context=None):
"""
This method used for checking journals already created or not. If not then create new journal.
"""
obj_journal = self.pool.get('account.journal')
rec_list = obj_journal.search(cr, uid, [('name','=', vals_journal['name']),('company_id', '=', company_id)], context=context)
if not rec_list:
obj_journal.create(cr, uid, vals_journal, context=context)
return True
def generate_journals(self, cr, uid, chart_template_id, acc_template_ref, company_id, context=None):
"""
This method is used for creating journals.
:param chart_temp_id: Chart Template Id.
:param acc_template_ref: Account templates reference.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
journal_data = self._prepare_all_journals(cr, uid, chart_template_id, acc_template_ref, company_id, context=context)
for vals_journal in journal_data:
self.check_created_journals(cr, uid, vals_journal, company_id, context=context)
return True
def _prepare_all_journals(self, cr, uid, chart_template_id, acc_template_ref, company_id, context=None):
def _get_analytic_journal(journal_type):
# Get the analytic journal
data = False
try:
if journal_type in ('sale', 'sale_refund'):
data = obj_data.get_object_reference(cr, uid, 'account', 'analytic_journal_sale')
elif journal_type in ('purchase', 'purchase_refund'):
data = obj_data.get_object_reference(cr, uid, 'account', 'exp')
elif journal_type == 'general':
pass
except ValueError:
pass
return data and data[1] or False
def _get_default_account(journal_type, type='debit'):
# Get the default accounts
default_account = False
if journal_type in ('sale', 'sale_refund'):
default_account = acc_template_ref.get(template.property_account_income_categ.id)
elif journal_type in ('purchase', 'purchase_refund'):
default_account = acc_template_ref.get(template.property_account_expense_categ.id)
elif journal_type == 'situation':
if type == 'debit':
default_account = acc_template_ref.get(template.property_account_expense_opening.id)
else:
default_account = acc_template_ref.get(template.property_account_income_opening.id)
return default_account
journal_names = {
'sale': _('Sales Journal'),
'purchase': _('Purchase Journal'),
'sale_refund': _('Sales Refund Journal'),
'purchase_refund': _('Purchase Refund Journal'),
'general': _('Miscellaneous Journal'),
'situation': _('Opening Entries Journal'),
}
journal_codes = {
'sale': _('SAJ'),
'purchase': _('EXJ'),
'sale_refund': _('SCNJ'),
'purchase_refund': _('ECNJ'),
'general': _('MISC'),
'situation': _('OPEJ'),
}
obj_data = self.pool.get('ir.model.data')
analytic_journal_obj = self.pool.get('account.analytic.journal')
template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
journal_data = []
for journal_type in ['sale', 'purchase', 'sale_refund', 'purchase_refund', 'general', 'situation']:
vals = {
'type': journal_type,
'name': journal_names[journal_type],
'code': journal_codes[journal_type],
'company_id': company_id,
'centralisation': journal_type == 'situation',
'analytic_journal_id': _get_analytic_journal(journal_type),
'default_credit_account_id': _get_default_account(journal_type, 'credit'),
'default_debit_account_id': _get_default_account(journal_type, 'debit'),
}
journal_data.append(vals)
return journal_data
def generate_properties(self, cr, uid, chart_template_id, acc_template_ref, company_id, context=None):
"""
This method used for creating properties.
:param chart_template_id: id of the current chart template for which we need to create properties
:param acc_template_ref: Mapping between ids of account templates and real accounts created from them
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
property_obj = self.pool.get('ir.property')
field_obj = self.pool.get('ir.model.fields')
todo_list = [
('property_account_receivable','res.partner','account.account'),
('property_account_payable','res.partner','account.account'),
('property_account_expense_categ','product.category','account.account'),
('property_account_income_categ','product.category','account.account'),
('property_account_expense','product.template','account.account'),
('property_account_income','product.template','account.account'),
]
template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
for record in todo_list:
account = getattr(template, record[0])
value = account and 'account.account,' + str(acc_template_ref[account.id]) or False
if value:
field = field_obj.search(cr, uid, [('name', '=', record[0]),('model', '=', record[1]),('relation', '=', record[2])], context=context)
vals = {
'name': record[0],
'company_id': company_id,
'fields_id': field[0],
'value': value,
}
property_ids = property_obj.search(cr, uid, [('name','=', record[0]),('company_id', '=', company_id)], context=context)
if property_ids:
#the property exist: modify it
property_obj.write(cr, uid, property_ids, vals, context=context)
else:
#create the property
property_obj.create(cr, uid, vals, context=context)
return True
def _install_template(self, cr, uid, template_id, company_id, code_digits=None, obj_wizard=None, acc_ref=None, taxes_ref=None, tax_code_ref=None, context=None):
'''
This function recursively loads the template objects and create the real objects from them.
:param template_id: id of the chart template to load
:param company_id: id of the company the wizard is running for
:param code_digits: integer that depicts the number of digits the accounts code should have in the COA
:param obj_wizard: the current wizard for generating the COA from the templates
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:param tax_code_ref: Mapping between ids of tax code templates and real tax codes created from them
:returns: return a tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
* a last identical containing the mapping of tax code templates and tax codes
:rtype: tuple(dict, dict, dict)
'''
if acc_ref is None:
acc_ref = {}
if taxes_ref is None:
taxes_ref = {}
if tax_code_ref is None:
tax_code_ref = {}
template = self.pool.get('account.chart.template').browse(cr, uid, template_id, context=context)
if template.parent_id:
tmp1, tmp2, tmp3 = self._install_template(cr, uid, template.parent_id.id, company_id, code_digits=code_digits, acc_ref=acc_ref, taxes_ref=taxes_ref, tax_code_ref=tax_code_ref, context=context)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
tax_code_ref.update(tmp3)
tmp1, tmp2, tmp3 = self._load_template(cr, uid, template_id, company_id, code_digits=code_digits, obj_wizard=obj_wizard, account_ref=acc_ref, taxes_ref=taxes_ref, tax_code_ref=tax_code_ref, context=context)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
tax_code_ref.update(tmp3)
return acc_ref, taxes_ref, tax_code_ref
def _load_template(self, cr, uid, template_id, company_id, code_digits=None, obj_wizard=None, account_ref=None, taxes_ref=None, tax_code_ref=None, context=None):
'''
This function generates all the objects from the templates
:param template_id: id of the chart template to load
:param company_id: id of the company the wizard is running for
:param code_digits: integer that depicts the number of digits the accounts code should have in the COA
:param obj_wizard: the current wizard for generating the COA from the templates
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:param tax_code_ref: Mapping between ids of tax code templates and real tax codes created from them
:returns: return a tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
* a last identical containing the mapping of tax code templates and tax codes
:rtype: tuple(dict, dict, dict)
'''
if account_ref is None:
account_ref = {}
if taxes_ref is None:
taxes_ref = {}
if tax_code_ref is None:
tax_code_ref = {}
template = self.pool.get('account.chart.template').browse(cr, uid, template_id, context=context)
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_acc_tax = self.pool.get('account.tax')
obj_tax_temp = self.pool.get('account.tax.template')
obj_acc_template = self.pool.get('account.account.template')
obj_fiscal_position_template = self.pool.get('account.fiscal.position.template')
# create all the tax code.
tax_code_ref.update(obj_tax_code_template.generate_tax_code(cr, uid, template.tax_code_root_id.id, company_id, context=context))
# Generate taxes from templates.
tax_templates = [x for x in template.tax_template_ids]
generated_tax_res = obj_tax_temp._generate_tax(cr, uid, tax_templates, tax_code_ref, company_id, context=context)
taxes_ref.update(generated_tax_res['tax_template_to_tax'])
# Generating Accounts from templates.
account_template_ref = obj_acc_template.generate_account(cr, uid, template_id, taxes_ref, account_ref, code_digits, company_id, context=context)
account_ref.update(account_template_ref)
# writing account values on tax after creation of accounts
for key,value in generated_tax_res['account_dict'].items():
if value['account_collected_id'] or value['account_paid_id']:
obj_acc_tax.write(cr, uid, [key], {
'account_collected_id': account_ref.get(value['account_collected_id'], False),
'account_paid_id': account_ref.get(value['account_paid_id'], False),
})
# Create Journals
self.generate_journals(cr, uid, template_id, account_ref, company_id, context=context)
# generate properties function
self.generate_properties(cr, uid, template_id, account_ref, company_id, context=context)
# Generate Fiscal Position , Fiscal Position Accounts and Fiscal Position Taxes from templates
obj_fiscal_position_template.generate_fiscal_position(cr, uid, template_id, taxes_ref, account_ref, company_id, context=context)
return account_ref, taxes_ref, tax_code_ref
def _create_tax_templates_from_rates(self, cr, uid, obj_wizard, company_id, context=None):
'''
This function checks if the chosen chart template is configured as containing a full set of taxes, and if
it's not the case, it creates the templates for account.tax.code and for account.account.tax objects accordingly
to the provided sale/purchase rates. Then it saves the new tax templates as default taxes to use for this chart
template.
:param obj_wizard: browse record of wizard to generate COA from templates
:param company_id: id of the company for wich the wizard is running
:return: True
'''
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_tax_temp = self.pool.get('account.tax.template')
chart_template = obj_wizard.chart_template_id
vals = {}
all_parents = self._get_chart_parent_ids(cr, uid, chart_template, context=context)
# create tax templates and tax code templates from purchase_tax_rate and sale_tax_rate fields
if not chart_template.complete_tax_set:
value = obj_wizard.sale_tax_rate
ref_tax_ids = obj_tax_temp.search(cr, uid, [('type_tax_use','in', ('sale','all')), ('chart_template_id', 'in', all_parents)], context=context, order="sequence, id desc", limit=1)
obj_tax_temp.write(cr, uid, ref_tax_ids, {'amount': value/100.0, 'name': _('Tax %.2f%%') % value})
value = obj_wizard.purchase_tax_rate
ref_tax_ids = obj_tax_temp.search(cr, uid, [('type_tax_use','in', ('purchase','all')), ('chart_template_id', 'in', all_parents)], context=context, order="sequence, id desc", limit=1)
obj_tax_temp.write(cr, uid, ref_tax_ids, {'amount': value/100.0, 'name': _('Purchase Tax %.2f%%') % value})
return True
def execute(self, cr, uid, ids, context=None):
'''
This function is called at the confirmation of the wizard to generate the COA from the templates. It will read
all the provided information to create the accounts, the banks, the journals, the taxes, the tax codes, the
accounting properties... accordingly for the chosen company.
'''
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
obj_data = self.pool.get('ir.model.data')
ir_values_obj = self.pool.get('ir.values')
obj_wizard = self.browse(cr, uid, ids[0])
company_id = obj_wizard.company_id.id
self.pool.get('res.company').write(cr, uid, [company_id], {'currency_id': obj_wizard.currency_id.id, 'accounts_code_digits': obj_wizard.code_digits}, context=context)
# When we install the CoA of first company, set the currency to price types and pricelists
if company_id==1:
for ref in (('product','list_price'),('product','standard_price'),('product','list0'),('purchase','list0')):
try:
tmp2 = obj_data.get_object_reference(cr, uid, *ref)
if tmp2:
self.pool[tmp2[0]].write(cr, uid, tmp2[1], {
'currency_id': obj_wizard.currency_id.id
})
except ValueError:
pass
# If the floats for sale/purchase rates have been filled, create templates from them
self._create_tax_templates_from_rates(cr, uid, obj_wizard, company_id, context=context)
# Install all the templates objects and generate the real objects
acc_template_ref, taxes_ref, tax_code_ref = self._install_template(cr, uid, obj_wizard.chart_template_id.id, company_id, code_digits=obj_wizard.code_digits, obj_wizard=obj_wizard, context=context)
# write values of default taxes for product as super user
if obj_wizard.sale_tax and taxes_ref:
ir_values_obj.set_default(cr, SUPERUSER_ID, 'product.template', "taxes_id", [taxes_ref[obj_wizard.sale_tax.id]], for_all_users=True, company_id=company_id)
if obj_wizard.purchase_tax and taxes_ref:
ir_values_obj.set_default(cr, SUPERUSER_ID, 'product.template', "supplier_taxes_id", [taxes_ref[obj_wizard.purchase_tax.id]], for_all_users=True, company_id=company_id)
# Create Bank journals
self._create_bank_journals_from_o2m(cr, uid, obj_wizard, company_id, acc_template_ref, context=context)
return {}
def _prepare_bank_journal(self, cr, uid, company, line, default_account_id, context=None):
'''
This function prepares the value to use for the creation of a bank journal created through the wizard of
generating COA from templates.
:param line: dictionary containing the values encoded by the user related to his bank account
:param default_account_id: id of the default debit.credit account created before for this journal.
:param company_id: id of the company for which the wizard is running
:return: mapping of field names and values
:rtype: dict
'''
obj_journal = self.pool.get('account.journal')
# we need to loop to find next number for journal code
for num in xrange(1, 100):
# journal_code has a maximal size of 5, hence we can enforce the boundary num < 100
journal_code = _('BNK')[:3] + str(num)
ids = obj_journal.search(cr, uid, [('code', '=', journal_code), ('company_id', '=', company.id)], context=context)
if not ids:
break
else:
raise UserError(_('Cannot generate an unused journal code.'))
return {
'name': line['acc_name'],
'code': journal_code,
'type': line['account_type'] == 'cash' and 'cash' or 'bank',
'company_id': company.id,
'analytic_journal_id': False,
'currency': line['currency_id'] or False,
'default_credit_account_id': default_account_id,
'default_debit_account_id': default_account_id,
}
def _prepare_bank_account(self, cr, uid, company, line, acc_template_ref=False, ref_acc_bank=False, context=None):
'''
This function prepares the value to use for the creation of the default debit and credit accounts of a
bank journal created through the wizard of generating COA from templates.
:param company: company for which the wizard is running
:param line: dictionary containing the values encoded by the user related to his bank account
:param acc_template_ref: the dictionary containing the mapping between the ids of account templates and the ids
of the accounts that have been generated from them.
:param ref_acc_bank: browse record of the account template set as root of all bank accounts for the chosen
template
:return: mapping of field names and values
:rtype: dict
'''
obj_data = self.pool.get('ir.model.data')
obj_acc = self.pool.get('account.account')
# Seek the next available number for the account code
code_digits = company.accounts_code_digits or 0
bank_account_code_char = company.bank_account_code_char or ''
for num in xrange(1, 100):
new_code = str(bank_account_code_char.ljust(code_digits - 1, '0')) + str(num)
ids = obj_acc.search(cr, uid, [('code', '=', new_code), ('company_id', '=', company.id)])
if not ids:
break
else:
raise UserError(_('Cannot generate an unused account code.'))
# Get the id of the user types fr-or cash and bank
tmp = obj_data.get_object_reference(cr, uid, 'account', 'data_account_type_cash')
cash_type = tmp and tmp[1] or False
tmp = obj_data.get_object_reference(cr, uid, 'account', 'data_account_type_bank')
bank_type = tmp and tmp[1] or False
parent_id = False
if acc_template_ref:
parent_id = acc_template_ref[ref_acc_bank.id]
else:
tmp = self.pool.get('account.account').search(cr, uid, [('code', '=', company.bank_account_code_char)], context=context)
if tmp:
parent_id = tmp[0]
return {
'name': line['acc_name'],
'currency_id': line['currency_id'] or False,
'code': new_code,
'type': 'liquidity',
'user_type': line['account_type'] == 'cash' and cash_type or bank_type,
# TODO: refactor when parent_id removed from account.account
'parent_id': parent_id,
'company_id': company.id,
}
def _create_bank_journals_from_o2m(self, cr, uid, obj_wizard, company_id, acc_template_ref, context=None):
'''
This function creates bank journals and its accounts for each line encoded in the field bank_accounts_id of the
wizard.
:param obj_wizard: the current wizard that generates the COA from the templates.
:param company_id: the id of the company for which the wizard is running.
:param acc_template_ref: the dictionary containing the mapping between the ids of account templates and the ids
of the accounts that have been generated from them.
:return: True
'''
obj_acc = self.pool.get('account.account')
obj_journal = self.pool.get('account.journal')
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
# Build a list with all the data to process
journal_data = []
if obj_wizard.bank_accounts_id:
for acc in obj_wizard.bank_accounts_id:
vals = {
'acc_name': acc.acc_name,
'account_type': acc.account_type,
'currency_id': acc.currency_id.id,
}
journal_data.append(vals)
ref_acc_bank = obj_wizard.chart_template_id.bank_account_view_id
if journal_data and not ref_acc_bank.code:
raise UserError(_('You have to set a code for the bank account defined on the selected chart of accounts.'))
self.pool.get('res.company').write(cr, uid, [company.id], {'bank_account_code_char': ref_acc_bank.code}, context=context)
for line in journal_data:
# Create the default debit/credit accounts for this bank journal
vals = self._prepare_bank_account(cr, uid, company, line, acc_template_ref, ref_acc_bank, context=context)
default_account_id = obj_acc.create(cr, uid, vals, context=context)
#create the bank journal
vals_journal = self._prepare_bank_journal(cr, uid, company, line, default_account_id, context=context)
obj_journal.create(cr, uid, vals_journal)
return True
class account_bank_accounts_wizard(osv.osv_memory):
_name='account.bank.accounts.wizard'
_columns = {
'acc_name': fields.char('Account Name.', required=True),
'bank_account_id': fields.many2one('wizard.multi.charts.accounts', 'Bank Account', required=True, ondelete='cascade'),
'currency_id': fields.many2one('res.currency', 'Secondary Currency', help="Forces all moves for this account to have this secondary currency."),
'account_type': fields.selection([('cash','Cash'), ('check','Check'), ('bank','Bank')], 'Account Type'),
}
|
agpl-3.0
|
barnone/EigenD
|
tools/packages/SCons/Tool/c++.py
|
2
|
3374
|
"""SCons.Tool.c++
Tool-specific initialization for generic Posix C++ compilers.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/c++.py 4577 2009/12/27 19:43:56 scons"
import os.path
import SCons.Tool
import SCons.Defaults
import SCons.Util
compilers = ['CC', 'c++']
CXXSuffixes = ['.cpp', '.cc', '.cxx', '.c++', '.C++', '.mm']
if SCons.Util.case_sensitive_suffixes('.c', '.C'):
CXXSuffixes.append('.C')
def iscplusplus(source):
if not source:
# Source might be None for unusual cases like SConf.
return 0
for s in source:
if s.sources:
ext = os.path.splitext(str(s.sources[0]))[1]
if ext in CXXSuffixes:
return 1
return 0
def generate(env):
"""
Add Builders and construction variables for Visual Age C++ compilers
to an Environment.
"""
import SCons.Tool
import SCons.Tool.cc
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
SCons.Tool.cc.add_common_cc_variables(env)
env['CXX'] = 'c++'
env['CXXFLAGS'] = SCons.Util.CLVar('')
env['CXXCOM'] = '$CXX -o $TARGET -c $CXXFLAGS $CCFLAGS $_CCCOMCOM $SOURCES'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
env['SHCXXCOM'] = '$SHCXX -o $TARGET -c $SHCXXFLAGS $SHCCFLAGS $_CCCOMCOM $SOURCES'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
env['SHOBJSUFFIX'] = '.os'
env['OBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 0
env['CXXFILESUFFIX'] = '.cc'
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gpl-3.0
|
synmnstr/flexx
|
flexx/app/pair.py
|
20
|
14935
|
"""
Base class for objects that live in both Python and JS.
This basically implements the syncing of signals.
"""
import sys
import json
import weakref
import hashlib
from .. import react
from ..react.hassignals import HasSignalsMeta, with_metaclass
from ..react.pyscript import create_js_signals_class, HasSignalsJS
from ..pyscript.functions import py2js, js_rename
from ..pyscript.parser2 import get_class_definition
from .serialize import serializer
if sys.version_info[0] >= 3:
string_types = str,
else: # pragma: no cover
string_types = basestring,
pair_classes = []
def get_pair_classes():
""" Get a list of all known Pair subclasses.
"""
return [c for c in HasSignalsMeta.CLASSES if issubclass(c, Pair)]
def get_instance_by_id(id):
""" Get instance of Pair class corresponding to the given id,
or None if it does not exist.
"""
return Pair._instances.get(id, None)
import json
class JSSignal(react.SourceSignal):
""" A signal that represents a proxy to a signal in JavaScript.
"""
def __init__(self, func_or_name, upstream=[], frame=None, ob=None, doc=None):
def func(v):
return v
if doc is not None:
func.__doc__ = doc
if isinstance(func_or_name, string_types):
func.__name__ = func_or_name
else:
func.__name__ = func_or_name.__name__
self._linked = False
react.SourceSignal.__init__(self, func, [], ob=ob)
def _subscribe(self, *args):
react.SourceSignal._subscribe(self, *args)
if not self._linked:
self.__self__._link_js_signal(self.name)
def _unsubscribe(self, *args):
react.SourceSignal._unsubscribe(self, *args)
if self._linked and not self._downstream:
self.__self__._link_js_signal(self.name, False)
class PySignal(react.SourceSignal):
""" A signal in JS that represents a proxy to a signal in Python.
"""
def __init__(self, name):
def func(v):
return v
func._name = name
react.SourceSignal.__init__(self, func, [])
class PyInputSignal(PySignal):
""" A signal in JS that represents an input signal in Python. On
the JS side, this can be used as an input too, although there is
no validation in this case.
"""
pass
class PairMeta(HasSignalsMeta):
""" Meta class for Pair
Set up proxy signals in Py/JS.
"""
def __init__(cls, name, bases, dct):
HasSignalsMeta.__init__(cls, name, bases, dct)
OK_MAGICS = '__init__', '__json__', '__from_json__'
# Create proxy signals on cls for each signal on JS
if 'JS' in cls.__dict__:
for name, val in cls.JS.__dict__.items():
if isinstance(val, react.Signal) and not isinstance(val, PySignal):
if not hasattr(cls, name):
cls.__signals__.append(name)
setattr(cls, name, JSSignal(name, doc=val._func.__doc__))
elif isinstance(getattr(cls, name), JSSignal):
pass # ok, overloaded signal on JS side
else:
print('Warning: JS signal %r not proxied, as it would hide a Py attribute.' % name)
# Implicit inheritance for JS "sub"-class
jsbases = [getattr(b, 'JS') for b in cls.__bases__ if hasattr(b, 'JS')]
JS = type('JS', tuple(jsbases), {})
for c in (cls, ): #cls.__bases__ + (cls, ):
if 'JS' in c.__dict__:
if '__init__' in c.JS.__dict__:
JS.__init__ = c.JS.__init__
for name, val in c.JS.__dict__.items():
if not name.startswith('__'):
setattr(JS, name, val)
elif name in OK_MAGICS:
setattr(JS, name, val)
cls.JS = JS
# Create proxy signals on cls.JS for each signal on cls
for name, val in cls.__dict__.items():
if isinstance(val, react.Signal) and not isinstance(val, JSSignal):
if not hasattr(cls.JS, name):
if isinstance(val, react.InputSignal):
setattr(cls.JS, name, PyInputSignal(name))
else:
setattr(cls.JS, name, PySignal(name))
elif isinstance(getattr(cls.JS, name), PySignal):
pass # ok, overloaded signal on JS side
else:
print('Warning: Py signal %r not proxied, as it would hide a JS attribute.' % name)
# Set JS and CSS for this class
cls.JS.CODE = cls._get_js()
cls.CSS = cls.__dict__.get('CSS', '')
def _get_js(cls):
""" Get source code for this class.
"""
cls_name = 'flexx.classes.' + cls.__name__
base_class = 'flexx.classes.%s.prototype' % cls.mro()[1].__name__
code = []
# Add JS version of HasSignals when this is the Pair class
if cls.mro()[1] is react.HasSignals:
c = py2js(serializer.__class__, 'flexx.Serializer')
code.append(c)
code.append('flexx.serializer = new flexx.Serializer();')
c = js_rename(HasSignalsJS.JSCODE, 'HasSignalsJS', 'flexx.classes.HasSignals')
code.append(c)
# Add this class
code.append(create_js_signals_class(cls.JS, cls_name, base_class))
if cls.mro()[1] is react.HasSignals:
code.append('flexx.serializer.add_reviver("Flexx-Pair", flexx.classes.Pair.prototype.__from_json__);\n')
return '\n'.join(code)
class Pair(with_metaclass(PairMeta, react.HasSignals)):
""" Subclass of HasSignals representing Python-JavaScript object pairs
Each instance of this class has a corresponding object in
JavaScript, and their signals are synced both ways. Signals defined
in Python can be connected to from JS, and vice versa.
The JS version of this class is defined by the contained ``JS``
class. One can define methods, signals, and (json serializable)
constants on the JS class.
Note:
This class may be renamed. Maybe Object, PairObject, ModelView
or something, suggestion welcome.
Parameters:
proxy: the proxy object that connects this instance to a JS client.
kwargs: initial signal values (see HasSignals).
Notes:
This class provides the base object for all widget classes in
``flexx.ui``. However, one can also create subclasses that have
nothing to do with user interfaces or DOM elements. You could e.g.
use it to calculate pi on nodejs.
Example:
.. code-block:: py
class MyPair(Pair):
def a_python_method(self):
...
class JS:
FOO = [1, 2, 3]
def a_js_method(this):
...
"""
# Keep track of all instances, so we can easily collect al JS/CSS
_instances = weakref.WeakValueDictionary()
# Count instances to give each instance a unique id
_counter = 0
# CSS for this class (no css in the base class)
CSS = ""
def __json__(self):
return {'__type__': 'Flexx-Pair', 'id': self.id}
def __from_json__(dct):
return get_instance_by_id(dct['id'])
def __init__(self, proxy=None, **kwargs):
# Set id and register this instance
Pair._counter += 1
self._id = self.__class__.__name__ + str(Pair._counter)
Pair._instances[self._id] = self
# Flag to implement eventual synchronicity
self._seid_from_js = 0
# Init proxy
if proxy is None:
from .proxy import manager
proxy = manager.get_default_proxy()
self._proxy = proxy
self._proxy.register_pair_class(self.__class__)
# Instantiate JavaScript version of this class
clsname = 'flexx.classes.' + self.__class__.__name__
cmd = 'flexx.instances.%s = new %s(%r);' % (self._id, clsname, self._id)
self._proxy._exec(cmd)
self._init()
# Init signals - signals will be connected updated, causing updates
# on the JS side.
react.HasSignals.__init__(self, **kwargs)
def _init(self):
""" Can be overloaded when creating a custom class.
"""
pass
@property
def id(self):
""" The unique id of this Pair instance. """
return self._id
@property
def proxy(self):
""" The proxy object that connects us to the runtime.
"""
return self._proxy
def __setattr__(self, name, value):
# Sync attributes that are Pair instances
react.HasSignals.__setattr__(self, name, value)
if isinstance(value, Pair):
txt = serializer.saves(value)
cmd = 'flexx.instances.%s.%s = flexx.serializer.loads(%r);' % (self._id, name, txt)
self._proxy._exec(cmd)
def _set_signal_from_js(self, name, text, esid):
""" Notes on synchronizing:
- Py and JS both send updates when a signal changes.
- JS does not send an update for signal updates received from Py.
- Py does, to allow eventual synchronicity. Read on.
- JS sends updates with a nonzero esid (eventual synchronicity
id) and marks the corresponding signal with the same id.
- Py sends an update with the esid that it got from JS, or 0
if the signal originates from Py.
- When JS receives an update from Py, it checks whether the
seid is 0 (the signal originates from Py) or if the signal
seid is 0 (the signal was updated from py since we last
updated it from JS). If either is 0, it updates the signal
value, and sets the signal esid to 0.
"""
signal = getattr(self, name)
value = serializer.loads(text)
self._seid_from_js = esid # to send back to js
signal._set(value)
def _signal_changed(self, signal):
# Set esid to 0 if it originates from Py, or to what we got from JS
esid = self._seid_from_js
self._seid_from_js = 0
if not isinstance(signal, JSSignal):
#txt = json.dumps(signal.value)
txt = serializer.saves(signal.value)
cmd = 'flexx.instances.%s._set_signal_from_py(%r, %r, %r);' % (self._id, signal.name, txt, esid)
self._proxy._exec(cmd)
def _link_js_signal(self, name, link=True):
""" Make a link between a JS signal and its proxy in Python.
This is done when a proxy signal is used as input for a signal
in Python.
"""
# if self._proxy is None:
# self._initial_signal_links.discart(name)
# if link:
# self._initial_signal_links.add(name)
# else:
link = 'true' if link else 'false'
cmd = 'flexx.instances.%s._link_js_signal(%r, %s);' % (self._id, name, link)
self._proxy._exec(cmd)
def call_js(self, call):
cmd = 'flexx.instances.%s.%s;' % (self._id, call)
self._proxy._exec(cmd)
class JS:
def __json__(self):
return {'__type__': 'Flexx-Pair', 'id': self.id}
def __from_json__(dct):
return flexx.instances[dct.id]
def __init__(self, id):
# Set id alias. In most browsers this shows up as the first element
# of the object, which makes it easy to identify objects while
# debugging. This attribute should *not* be used.
self.__id = self._id = self.id = id
self._linked_signals = {} # use a list as a set
# Call _init now. This gives subclasses a chance to init at a time
# when the id is set, but *before* the signals are connected.
self._init()
# Call HasSignals __init__, signals will be created and connected.
# Act signals relying on JS signals will fire.
# Act signals relying on Py signals will fire later.
super().__init__()
def _init(self):
pass
def _set_signal_from_py(self, name, text, esid):
value = flexx.serializer.loads(text)
signal = self[name]
if esid == 0 or signal._esid == 0:
self._signal_emit_lock = True # do not send back to py
signal._set(value)
signal._esid = 0 # mark signal as updated from py
def _signal_changed(self, signal):
if flexx.ws is None: # we could be exported or in an nbviewer
return
if self._signal_emit_lock:
self._signal_emit_lock = False
return
signal._esid = signal._count # mark signal as just updated by us
# todo: what signals do we sync? all but private signals? or only linked?
# signals like `children` should always sync, signals like a 100Hz timer not, mouse_pos maybe neither unless linked against
#if signal.signal_type == 'PyInputSignal' or self._linked_signals[signal._name]:
if signal.signal_type != 'PySignal' and not signal._name.startswith('_'):
#txt = JSON.stringify(signal.value)
txt = flexx.serializer.saves(signal.value)
flexx.ws.send('SIGNAL ' + [self.id, signal._esid, signal._name, txt].join(' '))
def _link_js_signal(self, name, link):
if link:
self._linked_signals[name] = True
signal = self[name]
if signal._timestamp > 1:
self._signal_changed(self[name])
elif self._linked_signals[name]:
del self._linked_signals[name]
## JS event system
# def _proxy_event(self, element, name):
# """ Easily get JS events from DOM elements in our event system.
# """
# that = this
# element.addEventListener(name, lambda ev: that.emit_event(name, {'cause': ev}), False)
#
# def _connect_js_event(self, element, event_name, method_name):
# """ Connect methods of this object to JS events.
# """
# that = this
# element.addEventListener(event_name, lambda ev: that[method_name](ev), False)
# Make pair objects de-serializable
serializer.add_reviver('Flexx-Pair', Pair.__from_json__)
|
bsd-2-clause
|
github-account-because-they-want-it/django
|
django/core/management/commands/startapp.py
|
513
|
1040
|
from importlib import import_module
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
class Command(TemplateCommand):
help = ("Creates a Django app directory structure for the given app "
"name in the current directory or optionally in the given "
"directory.")
missing_args_message = "You must provide an application name."
def handle(self, **options):
app_name, target = options.pop('name'), options.pop('directory')
self.validate_name(app_name, "app")
# Check that the app_name cannot be imported.
try:
import_module(app_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing "
"Python module and cannot be used as an app "
"name. Please try another name." % app_name)
super(Command, self).handle('app', app_name, target, **options)
|
bsd-3-clause
|
janisz/Diamond-1
|
src/diamond/handler/rrdtool.py
|
13
|
7698
|
# coding=utf-8
"""
Save stats in RRD files using rrdtool.
"""
import os
import re
import subprocess
import Queue
from Handler import Handler
#
# Constants for RRD file creation.
#
# NOTE: We default to the collectd RRD directory
# simply as a compatibility tool. Users that have
# tools that look in that location and would like
# to switch to Diamond need to make zero changes.
BASEDIR = '/var/lib/collectd/rrd'
METRIC_STEP = 10
BATCH_SIZE = 1
# NOTE: We don't really have a rigorous defition
# for metrics, particularly how often they will be
# reported, etc. Because of this, we have to guess
# at the steps and RRAs used for creation of the
# RRD files. These are a fairly sensible default,
# and basically allow for aggregated up from a single
# datapoint (because the XFF is 0.1, and each step
# aggregates not more than 10 of the previous step).
#
# Given a METRIC_STEP of 10 seconds, then these will
# represent data for up to the last full year.
RRA_SPECS = [
"RRA:AVERAGE:0.1:1:1200",
"RRA:MIN:0.1:1:1200",
"RRA:MAX:0.1:1:1200",
"RRA:AVERAGE:0.1:7:1200",
"RRA:MIN:0.1:7:1200",
"RRA:MAX:0.1:7:1200",
"RRA:AVERAGE:0.1:50:1200",
"RRA:MIN:0.1:50:1200",
"RRA:MAX:0.1:50:1200",
"RRA:AVERAGE:0.1:223:1200",
"RRA:MIN:0.1:223:1200",
"RRA:MAX:0.1:223:1200",
"RRA:AVERAGE:0.1:2635:1200",
"RRA:MIN:0.1:2635:1200",
"RRA:MAX:0.1:2635:1200",
]
class RRDHandler(Handler):
# NOTE: This handler is fairly loose about locking (none),
# and the reason is because the calls are always protected
# by locking done in the _process and _flush routines.
# If this were to change at some point, we would definitely
# want to be a bit more sensible about how we lock.
#
# We would probably also want to restructure this as a
# consumer and producer so that one thread can continually
# write out data, but that really depends on the design
# at the top level.
def __init__(self, *args, **kwargs):
super(RRDHandler, self).__init__(*args, **kwargs)
self._exists_cache = dict()
self._basedir = self.config['basedir']
self._batch = self.config['batch']
self._step = self.config['step']
self._queues = {}
self._last_update = {}
def get_default_config_help(self):
config = super(RRDHandler, self).get_default_config_help()
config.update({
'basedir': 'The base directory for all RRD files.',
'batch': 'Wait for this many updates before saving to the RRD file',
'step': 'The minimum interval represented in generated RRD files.',
})
return config
def get_default_config(self):
config = super(RRDHandler, self).get_default_config()
config.update({
'basedir': BASEDIR,
'batch': BATCH_SIZE,
'step': METRIC_STEP,
})
return config
def _ensure_exists(self, filename, metric_name, metric_type):
# We're good to go!
if filename in self._exists_cache:
return True
# Does the file already exist?
if os.path.exists(filename):
self._exists_cache[filename] = True
return True
# Attempt the creation.
self._create(filename, metric_name, metric_type)
self._exists_cache[filename] = True
return True
def _create(self, filename, metric_name, metric_type):
# Sanity check the metric name.
if not re.match("^[a-zA-Z0-9_]+$", metric_name):
raise Exception("Invalid metric name: %s" % metric_name)
# Sanity check the metric type.
if metric_type not in ("GAUGE", "COUNTER"):
raise Exception("Unknown metric type: %s" % metric_type)
# Try to create the directory.
# NOTE: If we aren't successful, the check_call()
# will fail anyways so we can do this optimistically.
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
ds_spec = "DS:%s:%s:%d:U:U" % (
metric_name, metric_type, self._step * 2)
rrd_create_cmd = [
"rrdtool", "create", filename,
"--no-overwrite",
"--step", str(self._step),
ds_spec
]
rrd_create_cmd.extend(RRA_SPECS)
subprocess.check_call(rrd_create_cmd, close_fds=True)
def process(self, metric):
# Extract the filename given the metric.
# NOTE: We have to tweak the metric name and limit
# the length to 19 characters for the RRD file format.
collector = metric.getCollectorPath()
metric_name = metric.getMetricPath().replace(".", "_")[:19]
dirname = os.path.join(self._basedir, metric.host, collector)
filename = os.path.join(dirname, metric_name + ".rrd")
# Ensure that there is an RRD file for this metric.
# This is done inline because it's quickly cached and
# we would like to have exceptions related to creating
# the RRD file raised in the main thread.
self._ensure_exists(filename, metric_name, metric.metric_type)
if self._queue(filename, metric.timestamp, metric.value) >= self._batch:
self._flush_queue(filename)
def _queue(self, filename, timestamp, value):
if not filename in self._queues:
queue = Queue.Queue()
self._queues[filename] = queue
else:
queue = self._queues[filename]
queue.put((timestamp, value))
return queue.qsize()
def flush(self):
# Grab all current queues.
for filename in self._queues.keys():
self._flush_queue(filename)
def _flush_queue(self, filename):
queue = self._queues[filename]
# Collect all pending updates.
updates = {}
max_timestamp = 0
while True:
try:
(timestamp, value) = queue.get(block=False)
# RRD only supports granularity at a
# per-second level (not milliseconds, etc.).
timestamp = int(timestamp)
# Remember the latest update done.
last_update = self._last_update.get(filename, 0)
if last_update >= timestamp:
# Yikes. RRDtool won't let us do this.
# We need to drop this update and log a warning.
self.log.warning(
"Dropping update to %s. Too frequent!" % filename)
continue
max_timestamp = max(timestamp, max_timestamp)
# Add this update.
if not timestamp in updates:
updates[timestamp] = []
updates[timestamp].append(value)
except Queue.Empty:
break
# Save the last update time.
self._last_update[filename] = max_timestamp
if len(updates) > 0:
# Construct our command line.
# This will look like <time>:<value1>[:<value2>...]
# The timestamps must be sorted, and we each of the
# <time> values must be unique (like a snowflake).
data_points = map(
lambda (timestamp, values): "%d:%s" %
(timestamp, ":".join(map(str, values))),
sorted(updates.items()))
# Optimisticly update.
# Nothing can really be done if we fail.
rrd_update_cmd = ["rrdupdate", filename, "--"]
rrd_update_cmd.extend(data_points)
self.log.info("update: %s" % str(rrd_update_cmd))
subprocess.call(rrd_update_cmd)
|
mit
|
wfarner/aurora
|
src/main/python/apache/aurora/tools/java/organize_imports.py
|
11
|
4397
|
#!/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Organizes a java source file's import statements in a way that pleases
Apache Aurora's checkstyle configuration. This expects exactly one
argument: the name of the file to modify with preferred import ordering.
'''
from __future__ import print_function
import re
import sys
from collections import defaultdict
IMPORT_RE = re.compile('import(?: static)? (.*);')
def get_group(import_statement):
matcher = IMPORT_RE.match(import_statement)
assert matcher, 'Could not parse import statement: %s' % import_statement
class_name = matcher.group(1)
group = class_name.split('.')[0]
return group
def index_by_group(import_statements):
groups = defaultdict(list)
for line in import_statements:
groups[get_group(line)].append(line)
return groups
IMPORT_CLASS_RE = re.compile(
'import(?: static)? (?P<outer>[^A-Z]*[A-Z]\w+)(?:\.(?P<inners>[\w][^;]*))?')
def get_all_group_lines(import_groups):
if not import_groups:
return []
def get_group_lines(group):
def comparator(x, y):
# These shenanigans are used to properly order imports for inner classes.
# So we get ordering like:
# import com.foo.Bar;
# import com.foo.Bar.Baz;
# (this is not lexicographical, so normal sort won't suffice)
x_m = IMPORT_CLASS_RE.match(x)
y_m = IMPORT_CLASS_RE.match(y)
if x_m.group('outer') == y_m.group('outer'):
return cmp(x_m.group('inners'), y_m.group('inners'))
else:
return cmp(x, y)
lines = sorted(import_groups[group], comparator)
lines.append('')
return lines
all_lines = []
explicit_groups = ['java', 'javax', 'scala', 'com', 'net', 'org']
for group in explicit_groups:
if group in import_groups:
all_lines += get_group_lines(group)
# Gather remaining groups.
remaining_groups = sorted(set(import_groups.keys()) - set(explicit_groups))
for group in remaining_groups:
all_lines += get_group_lines(group)
return all_lines
BEFORE_IMPORTS = 'before_imports'
IMPORTS = 'imports'
STATIC_IMPORTS = 'static_imports'
AFTER_IMPORTS = 'after_imports'
def main(argv):
if len(argv) != 2:
print('usage: %s FILE' % argv[0])
sys.exit(1)
print('Organizing imports in %s' % argv[1])
lines_before_imports = []
import_lines = []
static_import_lines = []
lines_after_imports = []
with open(argv[1], 'r') as f:
position = BEFORE_IMPORTS
for line in f:
line = line.rstrip()
if position == BEFORE_IMPORTS:
if line.startswith('import'):
position = IMPORTS
else:
lines_before_imports.append(line)
if position == IMPORTS:
if line.startswith('import static'):
position = STATIC_IMPORTS
elif line.startswith('import'):
import_lines.append(line)
elif line.strip():
position = AFTER_IMPORTS
if position == STATIC_IMPORTS:
if line.startswith('import static'):
static_import_lines.append(line)
elif line.strip():
position = AFTER_IMPORTS
if position == AFTER_IMPORTS:
lines_after_imports.append(line)
import_groups = index_by_group(import_lines)
static_import_groups = index_by_group(static_import_lines)
def ensure_line_padding(lines):
if lines and lines[-1] != '':
lines.append('')
return lines
file_lines = lines_before_imports
if import_groups:
ensure_line_padding(file_lines)
file_lines += get_all_group_lines(import_groups)
if static_import_groups:
ensure_line_padding(file_lines)
file_lines += get_all_group_lines(static_import_groups)
if lines_after_imports:
ensure_line_padding(file_lines)
file_lines += lines_after_imports
with open(argv[1], 'w') as f:
for line in file_lines:
print(line, file=f)
if __name__ == '__main__':
main(sys.argv)
|
apache-2.0
|
Yadnyawalkya/integration_tests
|
cfme/configure/documentation.py
|
3
|
3224
|
from widgetastic.widget import Image
from widgetastic.widget import Text
from widgetastic.widget import View
class LinksView(View):
"""
Widgets for all of the links on the documentation page
Each doc link is an anchor with a child image element, then an anchor with text
Both the image and the text anchor should link to the same PDF
"""
# todo: update this view. it contains too much duplicate code
@View.nested
class policies(View): # noqa
TEXT = 'Defining Policies Profiles Guide'
link = img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
@View.nested
class general(View): # noqa
TEXT = 'General Configuration Guide'
link = img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
@View.nested
class inventory(View): # noqa
TEXT = 'Infrastructure Inventory Guide'
link = img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
@View.nested
class automation(View): # noqa
TEXT = 'Methods For Automation Guide'
link = img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
@View.nested
class monitoring(View): # noqa
TEXT = 'Monitoring Alerts Reporting Guide'
link = img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
@View.nested
class providers(View): # noqa
TEXT = 'Providers Guide'
link = img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
@View.nested
class rest(View): # noqa
TEXT = 'Rest Api Guide'
link = img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
@View.nested
class scripting(View): # noqa
TEXT = 'Scripting Actions Guide'
link = img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
@View.nested
class vm_instances(View): # noqa
# TODO: title must be changed once BZ 1723813 is resolved.
TEXT = 'Virtual Machines Hosts Guide'
link = img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
@View.nested
class customer_portal(View): # noqa
TEXT = 'Red Hat Customer Portal'
link = Text('//a[normalize-space(.)="{}"]'.format(TEXT))
class DocView(View):
"""
View for the documentation page, a title and a bunch of pdf of links
"""
@property
def is_displayed(self):
return (
self.title.read() == 'Documentation' and
all([link.is_displayed for link in self.links.sub_widgets])
)
title = Text('//div[@id="main-content"]//div/h1')
links = View.nested(LinksView)
|
gpl-2.0
|
jbzdak/edx-platform
|
common/lib/capa/capa/tests/__init__.py
|
129
|
2700
|
"""Tools for helping with testing capa."""
import gettext
import os
import os.path
import fs.osfs
from capa.capa_problem import LoncapaProblem, LoncapaSystem
from capa.inputtypes import Status
from mock import Mock, MagicMock
import xml.sax.saxutils as saxutils
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
def tst_render_template(template, context):
"""
A test version of render to template. Renders to the repr of the context, completely ignoring
the template name. To make the output valid xml, quotes the content, and wraps it in a <div>
"""
return '<div>{0}</div>'.format(saxutils.escape(repr(context)))
def calledback_url(dispatch='score_update'):
return dispatch
xqueue_interface = MagicMock()
xqueue_interface.send_to_queue.return_value = (0, 'Success!')
def test_capa_system():
"""
Construct a mock LoncapaSystem instance.
"""
the_system = Mock(
spec=LoncapaSystem,
ajax_url='/dummy-ajax-url',
anonymous_student_id='student',
cache=None,
can_execute_unsafe_code=lambda: False,
get_python_lib_zip=lambda: None,
DEBUG=True,
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
i18n=gettext.NullTranslations(),
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
render_template=tst_render_template,
seed=0,
STATIC_URL='/dummy-static/',
STATUS_CLASS=Status,
xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10},
)
return the_system
def mock_capa_module():
"""
capa response types needs just two things from the capa_module: location and track_function.
"""
capa_module = Mock()
capa_module.location.to_deprecated_string.return_value = 'i4x://Foo/bar/mock/abc'
# The following comes into existence by virtue of being called
# capa_module.runtime.track_function
return capa_module
def new_loncapa_problem(xml, capa_system=None, seed=723):
"""Construct a `LoncapaProblem` suitable for unit tests."""
return LoncapaProblem(xml, id='1', seed=seed, capa_system=capa_system or test_capa_system(),
capa_module=mock_capa_module())
def load_fixture(relpath):
"""
Return a `unicode` object representing the contents
of the fixture file at the given path within a test_files directory
in the same directory as the test file.
"""
abspath = os.path.join(os.path.dirname(__file__), 'test_files', relpath)
with open(abspath) as fixture_file:
contents = fixture_file.read()
return contents.decode('utf8')
|
agpl-3.0
|
fredkingham/blog-of-fred
|
markdown/extensions/def_list.py
|
46
|
3699
|
"""
Definition List Extension for Python-Markdown
=============================================
Added parsing of Definition Lists to Python-Markdown.
A simple example:
Apple
: Pomaceous fruit of plants of the genus Malus in
the family Rosaceae.
: An american computer company.
Orange
: The fruit of an evergreen tree of the genus Citrus.
Copyright 2008 - [Waylan Limberg](http://achinghead.com)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..blockprocessors import BlockProcessor, ListIndentProcessor
from ..util import etree
import re
class DefListProcessor(BlockProcessor):
""" Process Definition Lists. """
RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)')
NO_INDENT_RE = re.compile(r'^[ ]{0,3}[^ :]')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
raw_block = blocks.pop(0)
m = self.RE.search(raw_block)
terms = [l.strip() for l in raw_block[:m.start()].split('\n') if l.strip()]
block = raw_block[m.end():]
no_indent = self.NO_INDENT_RE.match(block)
if no_indent:
d, theRest = (block, None)
else:
d, theRest = self.detab(block)
if d:
d = '%s\n%s' % (m.group(2), d)
else:
d = m.group(2)
sibling = self.lastChild(parent)
if not terms and sibling is None:
# This is not a definition item. Most likely a paragraph that
# starts with a colon at the begining of a document or list.
blocks.insert(0, raw_block)
return False
if not terms and sibling.tag == 'p':
# The previous paragraph contains the terms
state = 'looselist'
terms = sibling.text.split('\n')
parent.remove(sibling)
# Aquire new sibling
sibling = self.lastChild(parent)
else:
state = 'list'
if sibling and sibling.tag == 'dl':
# This is another item on an existing list
dl = sibling
if len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
state = 'looselist'
else:
# This is a new list
dl = etree.SubElement(parent, 'dl')
# Add terms
for term in terms:
dt = etree.SubElement(dl, 'dt')
dt.text = term
# Add definition
self.parser.state.set(state)
dd = etree.SubElement(dl, 'dd')
self.parser.parseBlocks(dd, [d])
self.parser.state.reset()
if theRest:
blocks.insert(0, theRest)
class DefListIndentProcessor(ListIndentProcessor):
""" Process indented children of definition list items. """
ITEM_TYPES = ['dd']
LIST_TYPES = ['dl']
def create_item(self, parent, block):
""" Create a new dd and parse the block with it as the parent. """
dd = etree.SubElement(parent, 'dd')
self.parser.parseBlocks(dd, [block])
class DefListExtension(Extension):
""" Add definition lists to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of DefListProcessor to BlockParser. """
md.parser.blockprocessors.add('defindent',
DefListIndentProcessor(md.parser),
'>indent')
md.parser.blockprocessors.add('deflist',
DefListProcessor(md.parser),
'>ulist')
def makeExtension(configs={}):
return DefListExtension(configs=configs)
|
bsd-3-clause
|
minichate/heroku-buildpack-ldap
|
src/virtualenv-1.7/virtualenv.py
|
77
|
90881
|
#!/usr/bin/env python
"""Create a "virtual" Python installation
"""
# If you change the version here, change it in setup.py
# and docs/conf.py as well.
virtualenv_version = "1.7"
import base64
import sys
import os
import optparse
import re
import shutil
import logging
import tempfile
import zlib
import errno
import distutils.sysconfig
from distutils.util import strtobool
try:
import subprocess
except ImportError:
if sys.version_info <= (2, 3):
print('ERROR: %s' % sys.exc_info()[1])
print('ERROR: this script requires Python 2.4 or greater; or at least the subprocess module.')
print('If you copy subprocess.py from a newer version of Python this script will probably work')
sys.exit(101)
else:
raise
try:
set
except NameError:
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
join = os.path.join
py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1])
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
is_win = (sys.platform == 'win32')
abiflags = getattr(sys, 'abiflags', '')
user_dir = os.path.expanduser('~')
if sys.platform == 'win32':
user_dir = os.environ.get('APPDATA', user_dir) # Use %APPDATA% for roaming
default_storage_dir = os.path.join(user_dir, 'virtualenv')
else:
default_storage_dir = os.path.join(user_dir, '.virtualenv')
default_config_file = os.path.join(default_storage_dir, 'virtualenv.ini')
if is_pypy:
expected_exe = 'pypy'
elif is_jython:
expected_exe = 'jython'
else:
expected_exe = 'python'
REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'nt', 'ntpath', 'genericpath',
'fnmatch', 'locale', 'encodings', 'codecs',
'stat', 'UserDict', 'readline', 'copy_reg', 'types',
're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile',
'zlib']
REQUIRED_FILES = ['lib-dynload', 'config']
majver, minver = sys.version_info[:2]
if majver == 2:
if minver >= 6:
REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc'])
if minver >= 7:
REQUIRED_MODULES.extend(['_weakrefset'])
if minver <= 3:
REQUIRED_MODULES.extend(['sets', '__future__'])
elif majver == 3:
# Some extra modules are needed for Python 3, but different ones
# for different versions.
REQUIRED_MODULES.extend(['_abcoll', 'warnings', 'linecache', 'abc', 'io',
'_weakrefset', 'copyreg', 'tempfile', 'random',
'__future__', 'collections', 'keyword', 'tarfile',
'shutil', 'struct', 'copy'])
if minver >= 2:
REQUIRED_FILES[-1] = 'config-%s' % majver
if minver == 3:
# The whole list of 3.3 modules is reproduced below - the current
# uncommented ones are required for 3.3 as of now, but more may be
# added as 3.3 development continues.
REQUIRED_MODULES.extend([
#"aifc",
#"antigravity",
#"argparse",
#"ast",
#"asynchat",
#"asyncore",
"base64",
#"bdb",
#"binhex",
"bisect",
#"calendar",
#"cgi",
#"cgitb",
#"chunk",
#"cmd",
#"codeop",
#"code",
#"colorsys",
#"_compat_pickle",
#"compileall",
#"concurrent",
#"configparser",
#"contextlib",
#"cProfile",
#"crypt",
#"csv",
#"ctypes",
#"curses",
#"datetime",
#"dbm",
#"decimal",
#"difflib",
#"dis",
#"doctest",
#"dummy_threading",
"_dummy_thread",
#"email",
#"filecmp",
#"fileinput",
#"formatter",
#"fractions",
#"ftplib",
#"functools",
#"getopt",
#"getpass",
#"gettext",
#"glob",
#"gzip",
"hashlib",
"heapq",
"hmac",
#"html",
#"http",
#"idlelib",
#"imaplib",
#"imghdr",
#"importlib",
#"inspect",
#"json",
#"lib2to3",
#"logging",
#"macpath",
#"macurl2path",
#"mailbox",
#"mailcap",
#"_markupbase",
#"mimetypes",
#"modulefinder",
#"multiprocessing",
#"netrc",
#"nntplib",
#"nturl2path",
#"numbers",
#"opcode",
#"optparse",
#"os2emxpath",
#"pdb",
#"pickle",
#"pickletools",
#"pipes",
#"pkgutil",
#"platform",
#"plat-linux2",
#"plistlib",
#"poplib",
#"pprint",
#"profile",
#"pstats",
#"pty",
#"pyclbr",
#"py_compile",
#"pydoc_data",
#"pydoc",
#"_pyio",
#"queue",
#"quopri",
"reprlib",
"rlcompleter",
#"runpy",
#"sched",
#"shelve",
#"shlex",
#"smtpd",
#"smtplib",
#"sndhdr",
#"socket",
#"socketserver",
#"sqlite3",
#"ssl",
#"stringprep",
#"string",
#"_strptime",
#"subprocess",
#"sunau",
#"symbol",
#"symtable",
#"sysconfig",
#"tabnanny",
#"telnetlib",
#"test",
#"textwrap",
#"this",
#"_threading_local",
#"threading",
#"timeit",
#"tkinter",
#"tokenize",
#"token",
#"traceback",
#"trace",
#"tty",
#"turtledemo",
#"turtle",
#"unittest",
#"urllib",
#"uuid",
#"uu",
#"wave",
"weakref",
#"webbrowser",
#"wsgiref",
#"xdrlib",
#"xml",
#"xmlrpc",
#"zipfile",
])
if is_pypy:
# these are needed to correctly display the exceptions that may happen
# during the bootstrap
REQUIRED_MODULES.extend(['traceback', 'linecache'])
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO+logging.WARN)/2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self, consumers):
self.consumers = consumers
self.indent = 0
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if rendered is None:
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' '*self.indent + rendered
if hasattr(consumer, 'write'):
consumer.write(rendered+'\n')
else:
consumer(rendered)
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self.level_matches(self.NOTIFY, self._stdout_level()):
sys.stdout.write(msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self.stdout_level_matches(self.NOTIFY):
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
sys.stdout.write('.')
sys.stdout.flush()
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger([])
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None and stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
#@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
level_for_integer = classmethod(level_for_integer)
# create a silent logger just to prevent this from being undefined
# will be overridden with requested verbosity main() is called.
logger = Logger([(Logger.LEVELS[-1], sys.stdout)])
def mkdir(path):
if not os.path.exists(path):
logger.info('Creating %s', path)
os.makedirs(path)
else:
logger.info('Directory %s already exists', path)
def copyfileordir(src, dest):
if os.path.isdir(src):
shutil.copytree(src, dest, True)
else:
shutil.copy2(src, dest)
def copyfile(src, dest, symlink=True):
if not os.path.exists(src):
# Some bad symlink in the src
logger.warn('Cannot find file %s (bad symlink)', src)
return
if os.path.exists(dest):
logger.debug('File %s already exists', dest)
return
if not os.path.exists(os.path.dirname(dest)):
logger.info('Creating parent directories for %s' % os.path.dirname(dest))
os.makedirs(os.path.dirname(dest))
if not os.path.islink(src):
srcpath = os.path.abspath(src)
else:
srcpath = os.readlink(src)
if symlink and hasattr(os, 'symlink'):
logger.info('Symlinking %s', dest)
try:
os.symlink(srcpath, dest)
except (OSError, NotImplementedError):
logger.info('Symlinking failed, copying to %s', dest)
copyfileordir(src, dest)
else:
logger.info('Copying to %s', dest)
copyfileordir(src, dest)
def writefile(dest, content, overwrite=True):
if not os.path.exists(dest):
logger.info('Writing %s', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
return
else:
f = open(dest, 'rb')
c = f.read()
f.close()
if c != content:
if not overwrite:
logger.notify('File %s exists with different content; not overwriting', dest)
return
logger.notify('Overwriting %s with new content', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
else:
logger.info('Content %s already in place', dest)
def rmtree(dir):
if os.path.exists(dir):
logger.notify('Deleting tree %s', dir)
shutil.rmtree(dir)
else:
logger.info('Do not need to delete %s; already gone', dir)
def make_exe(fn):
if hasattr(os, 'chmod'):
oldmode = os.stat(fn).st_mode & 0xFFF # 0o7777
newmode = (oldmode | 0x16D) & 0xFFF # 0o555, 0o7777
os.chmod(fn, newmode)
logger.info('Changed mode of %s to %s', fn, oct(newmode))
def _find_file(filename, dirs):
for dir in dirs:
if os.path.exists(join(dir, filename)):
return join(dir, filename)
return filename
def _install_req(py_executable, unzip=False, distribute=False,
search_dirs=None, never_download=False):
if search_dirs is None:
search_dirs = file_search_dirs()
if not distribute:
setup_fn = 'setuptools-0.6c11-py%s.egg' % sys.version[:3]
project_name = 'setuptools'
bootstrap_script = EZ_SETUP_PY
source = None
else:
setup_fn = None
source = 'distribute-0.6.24.tar.gz'
project_name = 'distribute'
bootstrap_script = DISTRIBUTE_SETUP_PY
if setup_fn is not None:
setup_fn = _find_file(setup_fn, search_dirs)
if source is not None:
source = _find_file(source, search_dirs)
if is_jython and os._name == 'nt':
# Jython's .bat sys.executable can't handle a command line
# argument with newlines
fd, ez_setup = tempfile.mkstemp('.py')
os.write(fd, bootstrap_script)
os.close(fd)
cmd = [py_executable, ez_setup]
else:
cmd = [py_executable, '-c', bootstrap_script]
if unzip:
cmd.append('--always-unzip')
env = {}
remove_from_env = []
if logger.stdout_level_matches(logger.DEBUG):
cmd.append('-v')
old_chdir = os.getcwd()
if setup_fn is not None and os.path.exists(setup_fn):
logger.info('Using existing %s egg: %s' % (project_name, setup_fn))
cmd.append(setup_fn)
if os.environ.get('PYTHONPATH'):
env['PYTHONPATH'] = setup_fn + os.path.pathsep + os.environ['PYTHONPATH']
else:
env['PYTHONPATH'] = setup_fn
else:
# the source is found, let's chdir
if source is not None and os.path.exists(source):
logger.info('Using existing %s egg: %s' % (project_name, source))
os.chdir(os.path.dirname(source))
# in this case, we want to be sure that PYTHONPATH is unset (not
# just empty, really unset), else CPython tries to import the
# site.py that it's in virtualenv_support
remove_from_env.append('PYTHONPATH')
else:
if never_download:
logger.fatal("Can't find any local distributions of %s to install "
"and --never-download is set. Either re-run virtualenv "
"without the --never-download option, or place a %s "
"distribution (%s) in one of these "
"locations: %r" % (project_name, project_name,
setup_fn or source,
search_dirs))
sys.exit(1)
logger.info('No %s egg found; downloading' % project_name)
cmd.extend(['--always-copy', '-U', project_name])
logger.start_progress('Installing %s...' % project_name)
logger.indent += 2
cwd = None
if project_name == 'distribute':
env['DONT_PATCH_SETUPTOOLS'] = 'true'
def _filter_ez_setup(line):
return filter_ez_setup(line, project_name)
if not os.access(os.getcwd(), os.W_OK):
cwd = tempfile.mkdtemp()
if source is not None and os.path.exists(source):
# the current working dir is hostile, let's copy the
# tarball to a temp dir
target = os.path.join(cwd, os.path.split(source)[-1])
shutil.copy(source, target)
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_ez_setup,
extra_env=env,
remove_from_env=remove_from_env,
cwd=cwd)
finally:
logger.indent -= 2
logger.end_progress()
if os.getcwd() != old_chdir:
os.chdir(old_chdir)
if is_jython and os._name == 'nt':
os.remove(ez_setup)
def file_search_dirs():
here = os.path.dirname(os.path.abspath(__file__))
dirs = ['.', here,
join(here, 'virtualenv_support')]
if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv':
# Probably some boot script; just in case virtualenv is installed...
try:
import virtualenv
except ImportError:
pass
else:
dirs.append(os.path.join(os.path.dirname(virtualenv.__file__), 'virtualenv_support'))
return [d for d in dirs if os.path.isdir(d)]
def install_setuptools(py_executable, unzip=False,
search_dirs=None, never_download=False):
_install_req(py_executable, unzip,
search_dirs=search_dirs, never_download=never_download)
def install_distribute(py_executable, unzip=False,
search_dirs=None, never_download=False):
_install_req(py_executable, unzip, distribute=True,
search_dirs=search_dirs, never_download=never_download)
_pip_re = re.compile(r'^pip-.*(zip|tar.gz|tar.bz2|tgz|tbz)$', re.I)
def install_pip(py_executable, search_dirs=None, never_download=False):
if search_dirs is None:
search_dirs = file_search_dirs()
filenames = []
for dir in search_dirs:
filenames.extend([join(dir, fn) for fn in os.listdir(dir)
if _pip_re.search(fn)])
filenames = [(os.path.basename(filename).lower(), i, filename) for i, filename in enumerate(filenames)]
filenames.sort()
filenames = [filename for basename, i, filename in filenames]
if not filenames:
filename = 'pip'
else:
filename = filenames[-1]
easy_install_script = 'easy_install'
if sys.platform == 'win32':
easy_install_script = 'easy_install-script.py'
cmd = [join(os.path.dirname(py_executable), easy_install_script), filename]
if sys.platform == 'win32':
cmd.insert(0, py_executable)
if filename == 'pip':
if never_download:
logger.fatal("Can't find any local distributions of pip to install "
"and --never-download is set. Either re-run virtualenv "
"without the --never-download option, or place a pip "
"source distribution (zip/tar.gz/tar.bz2) in one of these "
"locations: %r" % search_dirs)
sys.exit(1)
logger.info('Installing pip from network...')
else:
logger.info('Installing existing %s distribution: %s' % (
os.path.basename(filename), filename))
logger.start_progress('Installing pip...')
logger.indent += 2
def _filter_setup(line):
return filter_ez_setup(line, 'pip')
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_setup)
finally:
logger.indent -= 2
logger.end_progress()
def filter_ez_setup(line, project_name='setuptools'):
if not line.strip():
return Logger.DEBUG
if project_name == 'distribute':
for prefix in ('Extracting', 'Now working', 'Installing', 'Before',
'Scanning', 'Setuptools', 'Egg', 'Already',
'running', 'writing', 'reading', 'installing',
'creating', 'copying', 'byte-compiling', 'removing',
'Processing'):
if line.startswith(prefix):
return Logger.DEBUG
return Logger.DEBUG
for prefix in ['Reading ', 'Best match', 'Processing setuptools',
'Copying setuptools', 'Adding setuptools',
'Installing ', 'Installed ']:
if line.startswith(prefix):
return Logger.DEBUG
return Logger.INFO
class UpdatingDefaultsHelpFormatter(optparse.IndentedHelpFormatter):
"""
Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing
"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class ConfigOptionParser(optparse.OptionParser):
"""
Custom option parser which updates its defaults by by checking the
configuration files and environmental variables
"""
def __init__(self, *args, **kwargs):
self.config = ConfigParser.RawConfigParser()
self.files = self.get_config_files()
self.config.read(self.files)
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
config_file = os.environ.get('VIRTUALENV_CONFIG_FILE', False)
if config_file and os.path.exists(config_file):
return [config_file]
return [default_config_file]
def update_defaults(self, defaults):
"""
Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists).
"""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
config.update(dict(self.get_config_section('virtualenv')))
# 2. environmental variables
config.update(dict(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
# handle multiline configs
if option.action == 'append':
val = val.split()
else:
option.nargs = 1
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
try:
val = option.convert_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occured during configuration: %s" % e)
sys.exit(3)
defaults[option.dest] = val
return defaults
def get_config_section(self, name):
"""
Get a section of a configuration
"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self, prefix='VIRTUALENV_'):
"""
Returns a generator with all environmental vars with prefix VIRTUALENV
"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val)
def get_default_values(self):
"""
Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work.
"""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, basestring):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def main():
parser = ConfigOptionParser(
version=virtualenv_version,
usage="%prog [OPTIONS] DEST_DIR",
formatter=UpdatingDefaultsHelpFormatter())
parser.add_option(
'-v', '--verbose',
action='count',
dest='verbose',
default=0,
help="Increase verbosity")
parser.add_option(
'-q', '--quiet',
action='count',
dest='quiet',
default=0,
help='Decrease verbosity')
parser.add_option(
'-p', '--python',
dest='python',
metavar='PYTHON_EXE',
help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 '
'interpreter to create the new environment. The default is the interpreter that '
'virtualenv was installed with (%s)' % sys.executable)
parser.add_option(
'--clear',
dest='clear',
action='store_true',
help="Clear out the non-root install and start from scratch")
parser.add_option(
'--no-site-packages',
dest='no_site_packages',
action='store_true',
help="Don't give access to the global site-packages dir to the "
"virtual environment")
parser.add_option(
'--system-site-packages',
dest='system_site_packages',
action='store_true',
help="Give access to the global site-packages dir to the "
"virtual environment")
parser.add_option(
'--unzip-setuptools',
dest='unzip_setuptools',
action='store_true',
help="Unzip Setuptools or Distribute when installing it")
parser.add_option(
'--relocatable',
dest='relocatable',
action='store_true',
help='Make an EXISTING virtualenv environment relocatable. '
'This fixes up scripts and makes all .pth files relative')
parser.add_option(
'--distribute',
dest='use_distribute',
action='store_true',
help='Use Distribute instead of Setuptools. Set environ variable '
'VIRTUALENV_DISTRIBUTE to make it the default ')
default_search_dirs = file_search_dirs()
parser.add_option(
'--extra-search-dir',
dest="search_dirs",
action="append",
default=default_search_dirs,
help="Directory to look for setuptools/distribute/pip distributions in. "
"You can add any number of additional --extra-search-dir paths.")
parser.add_option(
'--never-download',
dest="never_download",
action="store_true",
help="Never download anything from the network. Instead, virtualenv will fail "
"if local distributions of setuptools/distribute/pip are not present.")
parser.add_option(
'--prompt=',
dest='prompt',
help='Provides an alternative prompt prefix for this environment')
if 'extend_parser' in globals():
extend_parser(parser)
options, args = parser.parse_args()
global logger
if 'adjust_options' in globals():
adjust_options(options, args)
verbosity = options.verbose - options.quiet
logger = Logger([(Logger.level_for_integer(2-verbosity), sys.stdout)])
if options.python and not os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
env = os.environ.copy()
interpreter = resolve_interpreter(options.python)
if interpreter == sys.executable:
logger.warn('Already using interpreter %s' % interpreter)
else:
logger.notify('Running virtualenv with interpreter %s' % interpreter)
env['VIRTUALENV_INTERPRETER_RUNNING'] = 'true'
file = __file__
if file.endswith('.pyc'):
file = file[:-1]
popen = subprocess.Popen([interpreter, file] + sys.argv[1:], env=env)
raise SystemExit(popen.wait())
# Force --use-distribute on Python 3, since setuptools is not available.
if majver > 2:
options.use_distribute = True
if os.environ.get('PYTHONDONTWRITEBYTECODE') and not options.use_distribute:
print(
"The PYTHONDONTWRITEBYTECODE environment variable is "
"not compatible with setuptools. Either use --distribute "
"or unset PYTHONDONTWRITEBYTECODE.")
sys.exit(2)
if not args:
print('You must provide a DEST_DIR')
parser.print_help()
sys.exit(2)
if len(args) > 1:
print('There must be only one argument: DEST_DIR (you gave %s)' % (
' '.join(args)))
parser.print_help()
sys.exit(2)
home_dir = args[0]
if os.environ.get('WORKING_ENV'):
logger.fatal('ERROR: you cannot run virtualenv while in a workingenv')
logger.fatal('Please deactivate your workingenv, then re-run this script')
sys.exit(3)
if 'PYTHONHOME' in os.environ:
logger.warn('PYTHONHOME is set. You *must* activate the virtualenv before using it')
del os.environ['PYTHONHOME']
if options.relocatable:
make_environment_relocatable(home_dir)
return
if options.no_site_packages:
logger.warn('The --no-site-packages flag is deprecated; it is now '
'the default behavior.')
create_environment(home_dir,
site_packages=options.system_site_packages,
clear=options.clear,
unzip_setuptools=options.unzip_setuptools,
use_distribute=options.use_distribute,
prompt=options.prompt,
search_dirs=options.search_dirs,
never_download=options.never_download)
if 'after_install' in globals():
after_install(options, home_dir)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True, extra_env=None,
remove_from_env=None):
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20]+"..."+part[-20:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
if hasattr(part, 'decode'):
try:
part = part.decode(sys.getdefaultencoding())
except UnicodeDecodeError:
part = part.decode(sys.getfilesystemencoding())
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.debug("Running command %s" % cmd_desc)
if extra_env or remove_from_env:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
if remove_from_env:
for varname in remove_from_env:
env.pop(varname, None)
else:
env = None
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, cmd_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
encoding = sys.getdefaultencoding()
fs_encoding = sys.getfilesystemencoding()
while 1:
line = stdout.readline()
try:
line = line.decode(encoding)
except UnicodeDecodeError:
line = line.decode(fs_encoding)
if not line:
break
line = line.rstrip()
all_output.append(line)
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
proc.communicate()
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % cmd_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise OSError(
"Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
else:
logger.warn(
"Command %s had error code %s"
% (cmd_desc, proc.returncode))
def create_environment(home_dir, site_packages=False, clear=False,
unzip_setuptools=False, use_distribute=False,
prompt=None, search_dirs=None, never_download=False):
"""
Creates a new environment in ``home_dir``.
If ``site_packages`` is true, then the global ``site-packages/``
directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
py_executable = os.path.abspath(install_python(
home_dir, lib_dir, inc_dir, bin_dir,
site_packages=site_packages, clear=clear))
install_distutils(home_dir)
# use_distribute also is True if VIRTUALENV_DISTRIBUTE env var is set
# we also check VIRTUALENV_USE_DISTRIBUTE for backwards compatibility
if use_distribute or os.environ.get('VIRTUALENV_USE_DISTRIBUTE'):
install_distribute(py_executable, unzip=unzip_setuptools,
search_dirs=search_dirs, never_download=never_download)
else:
install_setuptools(py_executable, unzip=unzip_setuptools,
search_dirs=search_dirs, never_download=never_download)
install_pip(py_executable, search_dirs=search_dirs, never_download=never_download)
install_activate(home_dir, bin_dir, prompt)
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if sys.platform == 'win32':
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
try:
import win32api
except ImportError:
print('Error: the path "%s" has a space in it' % home_dir)
print('To handle these kinds of paths, the win32api module must be installed:')
print(' http://sourceforge.net/projects/pywin32/')
sys.exit(3)
home_dir = win32api.GetShortPathName(home_dir)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
elif is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
elif is_pypy:
lib_dir = home_dir
inc_dir = join(home_dir, 'include')
bin_dir = join(home_dir, 'bin')
else:
lib_dir = join(home_dir, 'lib', py_version)
inc_dir = join(home_dir, 'include', py_version + abiflags)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir
def change_prefix(filename, dst_prefix):
prefixes = [sys.prefix]
if sys.platform == "darwin":
prefixes.extend((
os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(sys.prefix, "Extras", "lib", "python"),
os.path.join("~", "Library", "Python", sys.version[:3], "site-packages")))
if hasattr(sys, 'real_prefix'):
prefixes.append(sys.real_prefix)
prefixes = list(map(os.path.abspath, prefixes))
filename = os.path.abspath(filename)
for src_prefix in prefixes:
if filename.startswith(src_prefix):
_, relpath = filename.split(src_prefix, 1)
assert relpath[0] == os.sep
relpath = relpath[1:]
return join(dst_prefix, relpath)
assert False, "Filename %s does not start with any of these prefixes: %s" % \
(filename, prefixes)
def copy_required_modules(dst_prefix):
import imp
for modname in REQUIRED_MODULES:
if modname in sys.builtin_module_names:
logger.info("Ignoring built-in bootstrap module: %s" % modname)
continue
try:
f, filename, _ = imp.find_module(modname)
except ImportError:
logger.info("Cannot import bootstrap module: %s" % modname)
else:
if f is not None:
f.close()
dst_filename = change_prefix(filename, dst_prefix)
copyfile(filename, dst_filename)
if filename.endswith('.pyc'):
pyfile = filename[:-1]
if os.path.exists(pyfile):
copyfile(pyfile, dst_filename[:-1])
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear):
"""Install just the base environment, no distutils patches etc"""
if sys.executable.startswith(bin_dir):
print('Please use the *system* python to run this script')
return
if clear:
rmtree(lib_dir)
## FIXME: why not delete it?
## Maybe it should delete everything with #!/path/to/venv/python in it
logger.notify('Not deleting %s', bin_dir)
if hasattr(sys, 'real_prefix'):
logger.notify('Using real prefix %r' % sys.real_prefix)
prefix = sys.real_prefix
else:
prefix = sys.prefix
mkdir(lib_dir)
fix_lib64(lib_dir)
fix_local_scheme(home_dir)
stdlib_dirs = [os.path.dirname(os.__file__)]
if sys.platform == 'win32':
stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs'))
elif sys.platform == 'darwin':
stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages'))
if hasattr(os, 'symlink'):
logger.info('Symlinking Python bootstrap modules')
else:
logger.info('Copying Python bootstrap modules')
logger.indent += 2
try:
# copy required files...
for stdlib_dir in stdlib_dirs:
if not os.path.isdir(stdlib_dir):
continue
for fn in os.listdir(stdlib_dir):
bn = os.path.splitext(fn)[0]
if fn != 'site-packages' and bn in REQUIRED_FILES:
copyfile(join(stdlib_dir, fn), join(lib_dir, fn))
# ...and modules
copy_required_modules(home_dir)
finally:
logger.indent -= 2
mkdir(join(lib_dir, 'site-packages'))
import site
site_filename = site.__file__
if site_filename.endswith('.pyc'):
site_filename = site_filename[:-1]
elif site_filename.endswith('$py.class'):
site_filename = site_filename.replace('$py.class', '.py')
site_filename_dst = change_prefix(site_filename, home_dir)
site_dir = os.path.dirname(site_filename_dst)
writefile(site_filename_dst, SITE_PY)
writefile(join(site_dir, 'orig-prefix.txt'), prefix)
site_packages_filename = join(site_dir, 'no-global-site-packages.txt')
if not site_packages:
writefile(site_packages_filename, '')
else:
if os.path.exists(site_packages_filename):
logger.info('Deleting %s' % site_packages_filename)
os.unlink(site_packages_filename)
if is_pypy or is_win:
stdinc_dir = join(prefix, 'include')
else:
stdinc_dir = join(prefix, 'include', py_version + abiflags)
if os.path.exists(stdinc_dir):
copyfile(stdinc_dir, inc_dir)
else:
logger.debug('No include dir %s' % stdinc_dir)
# pypy never uses exec_prefix, just ignore it
if sys.exec_prefix != prefix and not is_pypy:
if sys.platform == 'win32':
exec_dir = join(sys.exec_prefix, 'lib')
elif is_jython:
exec_dir = join(sys.exec_prefix, 'Lib')
else:
exec_dir = join(sys.exec_prefix, 'lib', py_version)
for fn in os.listdir(exec_dir):
copyfile(join(exec_dir, fn), join(lib_dir, fn))
if is_jython:
# Jython has either jython-dev.jar and javalib/ dir, or just
# jython.jar
for name in 'jython-dev.jar', 'javalib', 'jython.jar':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(home_dir, name))
# XXX: registry should always exist after Jython 2.5rc1
src = join(prefix, 'registry')
if os.path.exists(src):
copyfile(src, join(home_dir, 'registry'), symlink=False)
copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'),
symlink=False)
mkdir(bin_dir)
py_executable = join(bin_dir, os.path.basename(sys.executable))
if 'Python.framework' in prefix:
if re.search(r'/Python(?:-32|-64)*$', py_executable):
# The name of the python executable is not quite what
# we want, rename it.
py_executable = os.path.join(
os.path.dirname(py_executable), 'python')
logger.notify('New %s executable in %s', expected_exe, py_executable)
if sys.executable != py_executable:
## FIXME: could I just hard link?
executable = sys.executable
if sys.platform == 'cygwin' and os.path.exists(executable + '.exe'):
# Cygwin misreports sys.executable sometimes
executable += '.exe'
py_executable += '.exe'
logger.info('Executable actually exists in %s' % executable)
shutil.copyfile(executable, py_executable)
make_exe(py_executable)
if sys.platform == 'win32' or sys.platform == 'cygwin':
pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe')
if os.path.exists(pythonw):
logger.info('Also created pythonw.exe')
shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe'))
if is_pypy:
# make a symlink python --> pypy-c
python_executable = os.path.join(os.path.dirname(py_executable), 'python')
logger.info('Also created executable %s' % python_executable)
copyfile(py_executable, python_executable)
if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe:
secondary_exe = os.path.join(os.path.dirname(py_executable),
expected_exe)
py_executable_ext = os.path.splitext(py_executable)[1]
if py_executable_ext == '.exe':
# python2.4 gives an extension of '.4' :P
secondary_exe += py_executable_ext
if os.path.exists(secondary_exe):
logger.warn('Not overwriting existing %s script %s (you must use %s)'
% (expected_exe, secondary_exe, py_executable))
else:
logger.notify('Also creating executable in %s' % secondary_exe)
shutil.copyfile(sys.executable, secondary_exe)
make_exe(secondary_exe)
if 'Python.framework' in prefix:
logger.debug('MacOSX Python framework detected')
# Make sure we use the the embedded interpreter inside
# the framework, even if sys.executable points to
# the stub executable in ${sys.prefix}/bin
# See http://groups.google.com/group/python-virtualenv/
# browse_thread/thread/17cab2f85da75951
original_python = os.path.join(
prefix, 'Resources/Python.app/Contents/MacOS/Python')
shutil.copy(original_python, py_executable)
# Copy the framework's dylib into the virtual
# environment
virtual_lib = os.path.join(home_dir, '.Python')
if os.path.exists(virtual_lib):
os.unlink(virtual_lib)
copyfile(
os.path.join(prefix, 'Python'),
virtual_lib)
# And then change the install_name of the copied python executable
try:
call_subprocess(
["install_name_tool", "-change",
os.path.join(prefix, 'Python'),
'@executable_path/../.Python',
py_executable])
except:
logger.fatal(
"Could not call install_name_tool -- you must have Apple's development tools installed")
raise
# Some tools depend on pythonX.Y being present
py_executable_version = '%s.%s' % (
sys.version_info[0], sys.version_info[1])
if not py_executable.endswith(py_executable_version):
# symlinking pythonX.Y > python
pth = py_executable + '%s.%s' % (
sys.version_info[0], sys.version_info[1])
if os.path.exists(pth):
os.unlink(pth)
os.symlink('python', pth)
else:
# reverse symlinking python -> pythonX.Y (with --python)
pth = join(bin_dir, 'python')
if os.path.exists(pth):
os.unlink(pth)
os.symlink(os.path.basename(py_executable), pth)
if sys.platform == 'win32' and ' ' in py_executable:
# There's a bug with subprocess on Windows when using a first
# argument that has a space in it. Instead we have to quote
# the value:
py_executable = '"%s"' % py_executable
cmd = [py_executable, '-c', """
import sys
prefix = sys.prefix
if sys.version_info[0] == 3:
prefix = prefix.encode('utf8')
if hasattr(sys.stdout, 'detach'):
sys.stdout = sys.stdout.detach()
elif hasattr(sys.stdout, 'buffer'):
sys.stdout = sys.stdout.buffer
sys.stdout.write(prefix)
"""]
logger.info('Testing executable with %s %s "%s"' % tuple(cmd))
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc_stdout, proc_stderr = proc.communicate()
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EACCES:
logger.fatal('ERROR: The executable %s could not be run: %s' % (py_executable, e))
sys.exit(100)
else:
raise e
proc_stdout = proc_stdout.strip().decode("utf-8")
proc_stdout = os.path.normcase(os.path.abspath(proc_stdout))
norm_home_dir = os.path.normcase(os.path.abspath(home_dir))
if hasattr(norm_home_dir, 'decode'):
norm_home_dir = norm_home_dir.decode(sys.getfilesystemencoding())
if proc_stdout != norm_home_dir:
logger.fatal(
'ERROR: The executable %s is not functioning' % py_executable)
logger.fatal(
'ERROR: It thinks sys.prefix is %r (should be %r)'
% (proc_stdout, norm_home_dir))
logger.fatal(
'ERROR: virtualenv is not compatible with this system or executable')
if sys.platform == 'win32':
logger.fatal(
'Note: some Windows users have reported this error when they installed Python for "Only this user". The problem may be resolvable if you install Python "For all users". (See https://bugs.launchpad.net/virtualenv/+bug/352844)')
sys.exit(100)
else:
logger.info('Got sys.prefix result: %r' % proc_stdout)
pydistutils = os.path.expanduser('~/.pydistutils.cfg')
if os.path.exists(pydistutils):
logger.notify('Please make sure you remove any previous custom paths from '
'your %s file.' % pydistutils)
## FIXME: really this should be calculated earlier
return py_executable
def install_activate(home_dir, bin_dir, prompt=None):
if sys.platform == 'win32' or is_jython and os._name == 'nt':
files = {'activate.bat': ACTIVATE_BAT,
'deactivate.bat': DEACTIVATE_BAT}
if os.environ.get('OS') == 'Windows_NT' and os.environ.get('OSTYPE') == 'cygwin':
files['activate'] = ACTIVATE_SH
else:
files = {'activate': ACTIVATE_SH}
# suppling activate.fish in addition to, not instead of, the
# bash script support.
files['activate.fish'] = ACTIVATE_FISH
# same for csh/tcsh support...
files['activate.csh'] = ACTIVATE_CSH
files['activate_this.py'] = ACTIVATE_THIS
home_dir = os.path.abspath(home_dir)
if hasattr(home_dir, 'decode'):
home_dir = home_dir.decode(sys.getfilesystemencoding())
vname = os.path.basename(home_dir)
for name, content in files.items():
content = content.replace('__VIRTUAL_PROMPT__', prompt or '')
content = content.replace('__VIRTUAL_WINPROMPT__', prompt or '(%s)' % vname)
content = content.replace('__VIRTUAL_ENV__', home_dir)
content = content.replace('__VIRTUAL_NAME__', vname)
content = content.replace('__BIN_NAME__', os.path.basename(bin_dir))
writefile(os.path.join(bin_dir, name), content)
def install_distutils(home_dir):
distutils_path = change_prefix(distutils.__path__[0], home_dir)
mkdir(distutils_path)
## FIXME: maybe this prefix setting should only be put in place if
## there's a local distutils.cfg with a prefix setting?
home_dir = os.path.abspath(home_dir)
## FIXME: this is breaking things, removing for now:
#distutils_cfg = DISTUTILS_CFG + "\n[install]\nprefix=%s\n" % home_dir
writefile(os.path.join(distutils_path, '__init__.py'), DISTUTILS_INIT)
writefile(os.path.join(distutils_path, 'distutils.cfg'), DISTUTILS_CFG, overwrite=False)
def fix_local_scheme(home_dir):
"""
Platforms that use the "posix_local" install scheme (like Ubuntu with
Python 2.7) need to be given an additional "local" location, sigh.
"""
try:
import sysconfig
except ImportError:
pass
else:
if sysconfig._get_default_scheme() == 'posix_local':
local_path = os.path.join(home_dir, 'local')
if not os.path.exists(local_path):
os.symlink(os.path.abspath(home_dir), local_path)
def fix_lib64(lib_dir):
"""
Some platforms (particularly Gentoo on x64) put things in lib64/pythonX.Y
instead of lib/pythonX.Y. If this is such a platform we'll just create a
symlink so lib64 points to lib
"""
if [p for p in distutils.sysconfig.get_config_vars().values()
if isinstance(p, basestring) and 'lib64' in p]:
logger.debug('This system uses lib64; symlinking lib64 to lib')
assert os.path.basename(lib_dir) == 'python%s' % sys.version[:3], (
"Unexpected python lib dir: %r" % lib_dir)
lib_parent = os.path.dirname(lib_dir)
assert os.path.basename(lib_parent) == 'lib', (
"Unexpected parent dir: %r" % lib_parent)
copyfile(lib_parent, os.path.join(os.path.dirname(lib_parent), 'lib64'))
def resolve_interpreter(exe):
"""
If the executable given isn't an absolute path, search $PATH for the interpreter
"""
if os.path.abspath(exe) != exe:
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, exe)):
exe = os.path.join(path, exe)
break
if not os.path.exists(exe):
logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe))
raise SystemExit(3)
if not is_executable(exe):
logger.fatal('The executable %s (from --python=%s) is not executable' % (exe, exe))
raise SystemExit(3)
return exe
def is_executable(exe):
"""Checks a file is executable"""
return os.access(exe, os.X_OK)
############################################################
## Relocating the environment:
def make_environment_relocatable(home_dir):
"""
Makes the already-existing environment use relative paths, and takes out
the #!-based environment selection in scripts.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
activate_this = os.path.join(bin_dir, 'activate_this.py')
if not os.path.exists(activate_this):
logger.fatal(
'The environment doesn\'t have a file %s -- please re-run virtualenv '
'on this environment to update it' % activate_this)
fixup_scripts(home_dir)
fixup_pth_and_egg_link(home_dir)
## FIXME: need to fix up distutils.cfg
OK_ABS_SCRIPTS = ['python', 'python%s' % sys.version[:3],
'activate', 'activate.bat', 'activate_this.py']
def fixup_scripts(home_dir):
# This is what we expect at the top of scripts:
shebang = '#!%s/bin/python' % os.path.normcase(os.path.abspath(home_dir))
# This is what we'll put:
new_shebang = '#!/usr/bin/env python%s' % sys.version[:3]
activate = "import os; activate_this=os.path.join(os.path.dirname(__file__), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this"
if sys.platform == 'win32':
bin_suffix = 'Scripts'
else:
bin_suffix = 'bin'
bin_dir = os.path.join(home_dir, bin_suffix)
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
for filename in os.listdir(bin_dir):
filename = os.path.join(bin_dir, filename)
if not os.path.isfile(filename):
# ignore subdirs, e.g. .svn ones.
continue
f = open(filename, 'rb')
lines = f.readlines()
f.close()
if not lines:
logger.warn('Script %s is an empty file' % filename)
continue
if not lines[0].strip().startswith(shebang):
if os.path.basename(filename) in OK_ABS_SCRIPTS:
logger.debug('Cannot make script %s relative' % filename)
elif lines[0].strip() == new_shebang:
logger.info('Script %s has already been made relative' % filename)
else:
logger.warn('Script %s cannot be made relative (it\'s not a normal script that starts with %s)'
% (filename, shebang))
continue
logger.notify('Making script %s relative' % filename)
lines = [new_shebang+'\n', activate+'\n'] + lines[1:]
f = open(filename, 'wb')
f.writelines(lines)
f.close()
def fixup_pth_and_egg_link(home_dir, sys_path=None):
"""Makes .pth and .egg-link files use relative paths"""
home_dir = os.path.normcase(os.path.abspath(home_dir))
if sys_path is None:
sys_path = sys.path
for path in sys_path:
if not path:
path = '.'
if not os.path.isdir(path):
continue
path = os.path.normcase(os.path.abspath(path))
if not path.startswith(home_dir):
logger.debug('Skipping system (non-environment) directory %s' % path)
continue
for filename in os.listdir(path):
filename = os.path.join(path, filename)
if filename.endswith('.pth'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .pth file %s, skipping' % filename)
else:
fixup_pth_file(filename)
if filename.endswith('.egg-link'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .egg-link file %s, skipping' % filename)
else:
fixup_egg_link(filename)
def fixup_pth_file(filename):
lines = []
prev_lines = []
f = open(filename)
prev_lines = f.readlines()
f.close()
for line in prev_lines:
line = line.strip()
if (not line or line.startswith('#') or line.startswith('import ')
or os.path.abspath(line) != line):
lines.append(line)
else:
new_value = make_relative_path(filename, line)
if line != new_value:
logger.debug('Rewriting path %s as %s (in %s)' % (line, new_value, filename))
lines.append(new_value)
if lines == prev_lines:
logger.info('No changes to .pth file %s' % filename)
return
logger.notify('Making paths in .pth file %s relative' % filename)
f = open(filename, 'w')
f.write('\n'.join(lines) + '\n')
f.close()
def fixup_egg_link(filename):
f = open(filename)
link = f.read().strip()
f.close()
if os.path.abspath(link) != link:
logger.debug('Link in %s already relative' % filename)
return
new_link = make_relative_path(filename, link)
logger.notify('Rewriting link %s in %s as %s' % (link, filename, new_link))
f = open(filename, 'w')
f.write(new_link)
f.close()
def make_relative_path(source, dest, dest_is_directory=True):
"""
Make a filename relative, where the filename is dest, and it is
being referred to from the filename source.
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../another-place/src/Directory'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../home/user/src/Directory'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'./'
"""
source = os.path.dirname(source)
if not dest_is_directory:
dest_filename = os.path.basename(dest)
dest = os.path.dirname(dest)
dest = os.path.normpath(os.path.abspath(dest))
source = os.path.normpath(os.path.abspath(source))
dest_parts = dest.strip(os.path.sep).split(os.path.sep)
source_parts = source.strip(os.path.sep).split(os.path.sep)
while dest_parts and source_parts and dest_parts[0] == source_parts[0]:
dest_parts.pop(0)
source_parts.pop(0)
full_parts = ['..']*len(source_parts) + dest_parts
if not dest_is_directory:
full_parts.append(dest_filename)
if not full_parts:
# Special case for the current directory (otherwise it'd be '')
return './'
return os.path.sep.join(full_parts)
############################################################
## Bootstrap script creation:
def create_bootstrap_script(extra_text, python_version=''):
"""
Creates a bootstrap script, which is like this script but with
extend_parser, adjust_options, and after_install hooks.
This returns a string that (written to disk of course) can be used
as a bootstrap script with your own customizations. The script
will be the standard virtualenv.py script, with your extra text
added (your extra text should be Python code).
If you include these functions, they will be called:
``extend_parser(optparse_parser)``:
You can add or remove options from the parser here.
``adjust_options(options, args)``:
You can change options here, or change the args (if you accept
different kinds of arguments, be sure you modify ``args`` so it is
only ``[DEST_DIR]``).
``after_install(options, home_dir)``:
After everything is installed, this function is called. This
is probably the function you are most likely to use. An
example would be::
def after_install(options, home_dir):
subprocess.call([join(home_dir, 'bin', 'easy_install'),
'MyPackage'])
subprocess.call([join(home_dir, 'bin', 'my-package-script'),
'setup', home_dir])
This example immediately installs a package, and runs a setup
script from that package.
If you provide something like ``python_version='2.4'`` then the
script will start with ``#!/usr/bin/env python2.4`` instead of
``#!/usr/bin/env python``. You can use this when the script must
be run with a particular Python version.
"""
filename = __file__
if filename.endswith('.pyc'):
filename = filename[:-1]
f = open(filename, 'rb')
content = f.read()
f.close()
py_exe = 'python%s' % python_version
content = (('#!/usr/bin/env %s\n' % py_exe)
+ '## WARNING: This file is generated\n'
+ content)
return content.replace('##EXT' 'END##', extra_text)
##EXTEND##
def convert(s):
b = base64.b64decode(s.encode('ascii'))
return zlib.decompress(b).decode('utf-8')
##file site.py
SITE_PY = convert("""
eJzVPP1z2zaWv/OvwMqTIZXKdD66nR2n7o2TOK3v3MTbpLO5dT06SoIk1hTJEqQV7c3d337vAwAB
kvLHdvvDaTKxRAIPDw/vGw8YjUanZSnzhdgUiyaTQsmkmq9FmdRrJZZFJep1Wi0Oy6Sqd/B0fpOs
pBJ1IdROxdgqDoKnv/MTPBWf1qkyKMC3pKmLTVKn8yTLdiLdlEVVy4VYNFWar0Sap3WaZOk/oEWR
x+Lp78cgOM8FzDxLZSVuZaUArhLFUlzu6nWRi6gpcc7P4z8nL8cToeZVWtbQoNI4A0XWSR3kUi4A
TWjZKCBlWstDVcp5ukzntuG2aLKFKLNkLsV//RdPjZqGYaCKjdyuZSVFDsgATAmwSsQDvqaVmBcL
GQvxWs4THICft8QKGNoE10whGfNCZEW+gjnlci6VSqqdiGZNTYAIZbEoAKcUMKjTLAu2RXWjxrCk
tB5beCQSZg9/MsweME8cv885gOOHPPg5T79MGDZwD4Kr18w2lVymX0SCYOGn/CLnU/0sSpdikS6X
QIO8HmOTgBFQIktnRyUtx7d6hb47IqwsVyYwhkSUuTG/pB5xcF6LJFPAtk2JNFKE+Vs5S5McqJHf
wnAAEUgaDI2zSFVtx6HZiQIAVLiONUjJRolok6Q5MOuPyZzQ/luaL4qtGhMFYLWU+LVRtTv/aIAA
0NohwCTAxTKr2eRZeiOz3RgQ+ATYV1I1WY0CsUgrOa+LKpWKAABqOyG/ANITkVRSk5A508jthOhP
NElzXFgUMBR4fIkkWaarpiIJE8sUOBe44t2Hn8Tbs9fnp+81jxlgLLOrDeAMUGihHZxgAHHUqOoo
K0Cg4+AC/4hksUAhW+H4gFfb4OjelQ4imHsZd/s4Cw5k14urh4E51qBMaKyA+v03dJmoNdDnf+5Z
7yA43UcVmjh/264LkMk82UixTpi/kDOCbzWc7+KyXr8CblAIpwZSKVwcRDBFeEASl2ZRkUtRAotl
aS7HAVBoRm39VQRWeF/kh7TWHU4ACFWQw0vn2ZhGzCVMtA/rFeoL03hHM9NNArvOm6IixQH8n89J
F2VJfkM4KmIo/jaTqzTPESHkhSA8CGlgdZMCJy5icUGtSC+YRiJk7cUtUSQa4CVkOuBJ+SXZlJmc
sPiibr1bjdBgshZmrTPmOGhZk3qlVWunOsh7L+LPHa4jNOt1JQF4M/OEblkUEzEDnU3YlMmGxave
FsQ5wYA8USfkCWoJffE7UPRUqWYj7UvkFdAsxFDBssiyYgskOw4CIQ6wkTHKPnPCW3gH/wNc/D+T
9XwdBM5IFrAGhcjvA4VAwCTIXHO1RsLjNs3KXSWT5qwpimohKxrqYcQ+YsQf2BjnGrwvam3UeLq4
ysUmrVElzbTJTNni5WHN+vEVzxumAZZbEc1M05ZOG5xeVq6TmTQuyUwuURL0Ir2yyw5jBgNjki2u
xYatDLwDssiULciwYkGls6wlOQEAg4UvydOyyaiRQgYTCQy0KQn+JkGTXmhnCdibzXKAConN9xzs
D+D2DxCj7ToF+swBAmgY1FKwfLO0rtBBaPVR4Bt905/HB049X2rbxEMukzTTVj7Jg3N6eFZVJL5z
WWKviSaGghnmNbp2qxzoiGI+Go2CwLhDO2W+Fiqoq90xsIIw40ynsyZFwzedoqnXP1TAowhnYK+b
bWfhgYYwnd4DlZwuy6rY4Gs7t4+gTGAs7BEciEvSMpIdZI8TXyH5XJVemqZoux12FqiHgsufzt6d
fz77KE7EVavSJl19dg1jnuUJsDVZBGCqzrCtLoOWqPhS1H3iHZh3YgqwZ9SbxFcmdQO8C6h/qhp6
DdOYey+Ds/enry/Opj9/PPtp+vH80xkgCHZGBgc0ZTSPDTiMKgbhAK5cqFjb16DXgx68Pv1oHwTT
VE3LXbmDB2AogYWrCOY7ESE+nGobPE3zZRGOqfGv7ISfsFrRHtfV8dfX4uREhL8mt0kYgNfTNuVF
/JEE4NOulNC1hj9RocZBsJBLEJYbiSIVPSVPdswdgIjQstCW9dcizc175iN3CJL4iHoADtPpPEuU
wsbTaQikpQ4DH+gQszuMchJBx3Lndh1rVPBTSViKHLtM8L8BFJMZ9UM0GEW3i2kEAraZJ0pyK5o+
9JtOUctMp5EeEMSPeBxcJFYcoTBNUMtUKXiixCuodWaqyPAnwke5JZHBYAj1Gi6SDnbi2yRrpIqc
SQERo6hDRlSNqSIOAqciAtvZLt143KWm4RloBuTLCtB7VYdy+DkADwUUjAm7MDTjaIlphpj+O8cG
hAM4iSEqaKU6UFificuzS/Hy2YtDdEAgSlxY6njN0aameSPtwyWs1krWDsLcK5yQMIxduixRM+LT
47thbmK7Mn1WWOolruSmuJULwBYZ2Fll8RO9gVga5jFPYBVBE5MFZ6VnPL0EI0eePUgLWnug3oag
mPU3S3/A4bvMFagODoWJ1DpOZ+NVVsVtiu7BbKdfgnUD9YY2zrgigbNwHpOhEQMNAX5rjpTayhAU
WNWwi0l4I0jU8ItWFcYE7gJ16zV9vcmLbT7l2PUE1WQ0tqyLgqWZFxu0S3Ag3oHdACQLCMVaojEU
cNIFytYhIA/Th+kCZSkaAEBgmhUFWA4sE5zRFDnOw2ERxviVIOGtJFr4WzMEBUeGGA4kehvbB0ZL
ICSYnFVwVjVoJkNZM81gYIckPtddxBw0+gA6VIzB0EUaGjcy9Ls6BuUsLlyl5PRDG/r582dmG7Wm
jAgiNsNJo9FfknmLyx2YwhR0gvGhOL9CbLAFdxTANEqzpjj8KIqS/SdYz0st22C5IR6r6/L46Gi7
3cY6H1BUqyO1PPrzX7755i/PWCcuFsQ/MB1HWnRyLD6id+iDxt8aC/SdWbkOP6a5z40EK5LkR5Hz
iPh936SLQhwfjq3+RC5uDSv+b5wPUCBTMyhTGWg7ajF6og6fxC/VSDwRkds2GrMnoU2qtWK+1YUe
dQG2GzyNedHkdegoUiW+AusGMfVCzppVaAf3bKT5AVNFOY0sDxw+v0YMfM4wfGVM8RS1BLEFWnyH
9D8x2yTkz2gNgeRFE9WLd3fDWswQd/FwebfeoSM0ZoapQu5AifCbPFgAbeO+5OBHO6No9xxn1Hw8
Q2AsfWCYV7uCEQoO4YJrMXGlzuFq9FFBmrasmkHBuKoRFDS4dTOmtgZHNjJEkOjdmPCcF1a3ADp1
cn0mojerAC3ccXrWrssKjieEPHAintMTCU7tce/dM17aJssoBdPhUY8qDNhbaLTTBfBlZABMxKj6
ecQtTWDxobMovAYDwArO2iCDLXvMhG9cH3B0MBpgp57V39ebaTwEAhcp4uzRg6ATyic8QqVAmsrI
77mPxS1x+4PdaXGIqcwykUirPcLVVR6DQnWnYVqmOepeZ5HieVaAV2y1IjFS+953FihywcdDxkxL
oCZDSw6n0Ql5e54AhrodJrxWDaYG3MwJYrRJFVk3JNMa/gO3gjISlD4CWhI0C+ahUuZP7F8gc3a+
+sse9rCERoZwm+5zQ3oWQ8Mx7w8EklHnT0AKciBhXxjJdWR1kAGHOQvkCTe8lnulm2DECuTMsSCk
ZgB3eukFOPgkxj0LklCE/KVWshRfiREsX1dUH6a7/6VcatIGkdOAXAWdbzhxcxFOHuKkk5fwGdrP
SNDuRlkAB8/A5XFT8y6bG6a1aRJw1n3FbZECjUyZk9HYRfXaEMZN//7pxGnREssMYhjKG8jbhDEj
jQO73Bo0LLgB4615dyz92M1YYN8oLNQLufkC8V9YpWpeqBAD3F7uwv1orujTxmJ7kc5G8MdbgNH4
2oMkM52/wCzLPzFI6EEPh6B7k8W0yCKptmkekgLT9Dvxl6aHhyWlZ+SOPlI4dQQTxRzl0bsKBIQ2
K49AnFATQFQuQ6Xd/j7YO6c4snC5+8hzm6+OX173iTvZl+Gxn+GlOvtSV4nC1cp40VgocLX6BhyV
LkwuyXd6u1FvR2OYUBUKokjx4eNngYTgTOw22T1u6i3DIzb3zsn7GNRBr91Lrs7siF0AEdSKyChH
4eM58uHIPnZyd0zsEUAexTB3LIqBpPnkn4Fz10LBGIeLXY55tK7KwA+8/ubr6UBm1EXym69H94zS
IcaQ2EcdT9COTGUAYnDapkslk4x8DacTZRXzlndsm3LMCp3iP81k1wNOJ37Me2MyWvi95r3A0XwO
iB4QZhezXyFYVTq/dZukGSXlAY3DQ9RzJs7m1MEwPh6ku1HGnBR4LM8mg6GQunoGCxNyYD/uT0f7
Racm9zsQkJpPmag+Kgd6A77dP/I21d29w/2yP2ip/yCd9UhA3mxGAwR84BzM3ub//5mwsmJoWlmN
O1pfybv1vAH2AHW4x825ww3pD827WUvjTLDcKfEUBfSp2NKGNuXycGcCoCzYzxiAg8uot0XfNFXF
m5sk56WsDnHDbiKwlsd4GlQi1Adz9F7WiIltNqfcqFP5UQypzlBnO+1MwtZPHRbZdWFyJDK/TSvo
C1olCn/48ONZ2GcAPQx2GgbnrqPhkofbKYT7CKYNNXHCx/RhCj2myz8vVV1X2Seo2TM2GUhNtj5h
e4lHE7cOr8E9GQhvg5A3YjEinK/l/GYqaXMZ2RS7OknYN/gaMbF7zn6FkEqWVOYEM5lnDdKKHT2s
T1s2+Zzy8bUEe66LSbG4hLaMOd20zJKViKjzAlMdmhspG3KbVNrbKasCyxdFky6OVulCyN+aJMMw
Ui6XgAtuluhXMQ9PGQ/xlne9uaxNyXlTpfUOSJCoQu810Qa503C244lGHpK8rcAExC3zY/ERp43v
mXALQy4TjPoZdpwkxnnYwWwGInfRc3ifF1McdUpVoBNGqr8PTI+D7ggFABgBUJj/aKwzRf4bSa/c
DS1ac5eoqCU9UrqRbUEeB0KJxhhZ82/66TOiy1t7sFztx3J1N5arLparQSxXPparu7F0RQIX1iZJ
jCQMJUq6afTBigw3x8HDnCXzNbfD6kCsAgSIojQBnZEpLpL1Mim8n0RASG07G5z0sK2wSLnssCo4
5apBIvfjpokOHk15s9OZ6jV0Z56K8dn2VZn4fY/imIqJZtSd5W2R1EnsycUqK2YgthbdSQtgIroF
J5yby2+nM84mdizV6PI/P/3w4T02R1Ajs51O3XAR0bDgVKKnSbVSfWlqg40S2JFa+oUf1E0DPHhg
JodHOeD/3lJFATKO2NKOeCFK8ACo7sc2c6tjwrDzXJfR6OfM5Ly5cSJGeT1qJ7WHSKeXl29PP52O
KMU0+t+RKzCGtr50uPiYFrZB339zm1uKYx8Qap1LaY2fOyeP1i1H3G9jDdiO2/vsuvPgxUMM9mBY
6s/yD6UULAkQKtbJxscQ6sHBz+8KE3r0MYzYKw9zd3LYWbHvHNlzXBRH9IfS3N0B/M01jDGmQADt
QkUmMmiDqY7St+b1Doo6QB/o6/3uEKwbenUjGZ+idhIDDqBDWdtsv/vn7Quw0VOyfn32/fn7i/PX
l6effnBcQHTlPnw8eiHOfvwsqB4BDRj7RAluxddY+QKGxT0KIxYF/GswvbFoak5KQq+3Fxd6Z2CD
hyGwOhZtTgzPuWzGQuMcDWc97UNd74IYZTpAck6dUHkInUrBeGnDJx5UoSto6TDLDJ3VRode+jSR
OXVE+6gxSB80dknBILikCV5RnXNtosKKd5z0SZwBpLSNtoUIGeWgetvTzn6LyeZ7iTnqDE/azlrR
X4UuruF1rMoshUjuVWhlSXfDcoyWcfRDu6HKeA1pQKc7jKwb8qz3YoFW61XIc9P9xy2j/dYAhi2D
vYV555LKEahGF4upRIiNeOcglF/gq116vQYKFgw3lmpcRMN0Kcw+geBarFMIIIAn12B9MU4ACJ2V
8BPQx052QBZYDRC+2SwO/xpqgvitf/lloHldZYd/FyVEQYJLV8IBYrqN30LgE8tYnH14Nw4ZOSoF
FX9tsIAcHBLK8jnSTvUyvGM7jZTMlrqewdcH+EL7CfS6072SZaW7D7vGIUrAExWR1/BEGfqFWF5k
YU9wKuMOaKyNt5jhGTN329t8DsTHtcwyXRF9/vbiDHxHLNdHCeJ9njMYjvMluGWri734DFwHFG7o
wusK2bhCF5Y29Rex12wwM4siR729OgC7TpT97PfqpTqrJFUu2hFOm2GZgvMYWRnWwiwrs3anDVLY
bUMUR5lhlpheVlQw6fME8DI9TTgkglgJDwOYNDPvWqZ5bSrksnQOehRULijUCQgJEhdPvBHnFTkn
eotKmYMy8LDcVelqXWMyHTrHVKSPzX88/Xxx/p4K11+8bL3uAeacUCQw4aKFEyxJw2wHfHHLzJCr
ptMhntWvEAZqH/jTfcXVECc8QK8fJxbxT/cVn1Q6cSJBngEoqKbsigcGAE63IblpZYFxtXEwftyS
sxYzHwzlIvFghC4scOfX50TbsmNKKO9jXj5il2JZahpGprNbAtX96DkuS9xWWUTDjeDtkGyZzwy6
3vTe7Cu2cj89KcRDk4BRv7U/hqlG6jXV03GYbR+3UFirbewvuZMrddrNcxRlIGLkdh67TDashHVz
5kCvbLcHTHyr0TWSOKjKR7/kI+1heJhYYvfiFNORjk2QEcBMhtSnQxrwodAigAKhatPIkdzJ+OkL
b46ONbh/jlp3gW38ARShrv2kMwVFBZwIX35jx5FfEVqoR49F6HgqucwLW5eEn+0avcrn/hwHZYCS
mCh2VZKvZMSwJgbmVz6x96RgSdt6pL5Kr4cMizgH5/TLHg7vy8XwxolBrcMIvXY3ctdVRz55sMHg
0YM7CeaDr5It6P6yqSNeyWGRHz5ttR/q/RCx2g2a6s3eKMR0zG/hnvVpAQ9SQ8NCD++3gd0i/PDa
GEfW2sfOKZrQvtAe7LyC0KxWtC3jHF8zvqj1AlqDe9Ka/JF9qgtT7O+Bc0lOTsgC5cFdkN7cRrpB
J50w4uMxfLYwpfLr9vSGfreQtzIrwPWCqA6r63+11fXj2KZTBuuOfjd2l7vL3TBu9KbF7NiU/6Nn
pkpYvziX9RGiM5jxuQuzFhlc6l90SJLkN+Qlv/nb+US8ef8T/P9afoC4Co/HTcTfAQ3xpqggvuTz
nXTwHk8O1Bw4Fo3CM3QEjbYq+I4CdNsuPTrjtog+0uCfZbCaUmAVZ7XhizEARZ4gnXlu/QRTqA+/
zUmijjdqPMWhRRnpl0iD/Ycr8EDCkW4Zr+tNhvbCyZK0q3k1ujh/c/b+41lcf0EONz9HThbFLwDC
6eg94gr3wybCPpk3+OTacZx/kFk54DfroNMc1MCgU4QQl5Q20ORLFxIbXCQVZg5EuVsU8xhbAsvz
2bB6C4702Ikv7zX0npVFWNFY76K13jw+BmqIX7qKaAQNqY+eE/UkhJIZHlLix/Fo2BRPBKW24c/T
m+3CzYzr0yY0wS6m7awjv7vVhWums4ZnOYnwOrHLYA4gZmmiNrO5ezDtQy70nRmg5WifQy6TJquF
zEFyKcinywtA07tnyVhCmFXYnNEBK0rTZNtkp5xKm0SJEY46ovPXuCFDGUOIwX9Mbtge4CE30fBp
WYBOiFL8VDhdVTNfswRzSETUGyg82Kb5yxdhj8I8KEfI89aRhXmi28gYrWSt588PovHV87bSgbLS
c+8k6bwEq+eyyQGozvLp06cj8W/3ez+MSpwVxQ24ZQB70Gu5oNd7LLeenF2tvmdv3sTAj/O1vIIH
15Q9t8+bnFKTd3SlBZH2r4ER4tqElhlN+45d5qRdxRvN3II3rLTl+DlP6WYcTC1JVLb6giFMOxlp
IpYExRAmap6mIacpYD12RYOHwDDNqPlFfgGOTxHMBN/iDhmH2mv0MKlg03KPRedEjAjwiAqoeDQ6
RUvHoADP6eVOozk9z9O6Pb/wzN081afFa3vhjeYrkWxRMsw8OsRwzhN6rNp62MWdLOpFLMX8yk04
dmbJr+/DHVgbJK1YLg2m8NAs0ryQ1dyYU1yxdJ7WDhjTDuFwZ7rnh6xPHAygNAL1TlZhYSXavv2T
XRcX0w+0j3xoRtLlQ7W9O4mTQ0neqaKL43Z8SkNZQlq+NV/GMMp7SmtrT8AbS/xJJ1WxeN274sE9
R9fk+uoGrt9o73MAOHRdkFWQlh09HeHcUWXhM9PuuXABPxSiE263aVU3STbVNwRM0WGb2o11jac9
f3XnyULrrYCTX4AHfKhLxcFxMFU2SE+s9DRHAU7EUqcoYvdIk3/6pyzQy3vBvhL4FEiZxdQcxDVJ
pCvLrvaE4zO+gsBR8QjqK3Nq5iE2wZzd6B17cKcxoaKncNwt5ey1wg0WU5tvPe9uZPCoITuwfC/e
TLB7cYP47kREzyfiz51AbF7u8OohIMOTRfxkEfo+IXW9On7R2rl+4NuBsBfIy+tHTzdLZzS9cKjG
+v6+uugRA9ANyO4ylYvDJwqxY5x/L1QNpZ3Xfk6lGeMR7ANbdaVPH7dnMujo1Qyiim2r0BzVZvxf
O4g51qz1EJ8ARaXBFtCeWjeFL53iQ3uzGBYmavT8lUUpmQ5tjuE3vB0E3muCukK1d9NUl5FbsAM5
AX1WkLfA2oYDQeEjeCikm0xo0b7qbAv/kYvHlen7Nhd7WH7z9V14ugI+WJY/QFCPmE6rP5Cp9rLM
YxfmAfv19/Pfw3nvLr57NJV0r2FaYSiFhczrhN+gSWzKY5tqMCKJW0GRW96Gn/pm8OAHiyPqpvom
vGv63P+uuesWgZ252d3tzd0/4OXSQPfdzy9DNOAwTxPiQTXjrcAO6wJXjCe6qGA4Zak/SH63E850
j1a4D4wpYcAEKLGpxt5ozU0yd79jhcwh32Hqnucb1NWdafcOOHY5/iGKlqsB8Lk94kslHgvNgew3
0qVUUy4anMrVSk0TvBBtSsEGFbj0vEjjvr6j+6xkonbG68RbQwCE4SZdiuhWGwNjQEDDF7NyfYhz
PYSgoamK0inLVOmCM0jaxQVwMWeOqL/JTHJd5SiTmPBTTVVWEBWM9PWdXLgwVOvZAjWJjE2ibgzq
psdE3+aIQ3C1jDkDyPkqjjQ86gAh+GiQczcRFypPp/Yd8Muz9qxzOrEMIfNmI6ukbu/58LdJU/Gd
MwKd/MQFdlIVrWR2OMVFLLX84SCFyQL7/SvtZHtBxh0HnMdW6z2craiHToE95uy0Y3sMN6df7D1f
7v0yC7oV1jXytlnLffZuE1gKc2kV6UqdO+C3+iIdvp6RM5voJjh8BHLvnrvyy3OtWmMnxaLhPHMV
Q//mFDy6S7Z46EK0Hhf0rz7rOPp2fF9vWGbphQZ7GlsqatdqUPG0o43biBor6e6JqP1q6UdG1B78
B0bU+vo6MDgaH60PBuun7wm9WU24d8G1jAB9pkAk3Nnr3CRmTGbkViND2Jt+Gdm7WFlnOkecjJlA
juxfEkQg+M435ZZuencymXGHIlpfuujx9xcfXp9eEC2ml6dv/uP0e6pWwfRxx2Y9OOWQF4dM7UOv
LtZNP+gKg6HBW2wHLlfkwx0aQu99b3N2AMLwQZ6hBe0qMvf1vg69AxH9ToD43dPuQN2nsgch9/wz
XXzv1hV0ClgD/ZSrDc0vZ8vWPDI7FywO7c6Eed8mk7WM9nJt+xbOqfvrqxPtt+rr+PbkAce2+pRW
AHPIyF82hWyOEthEJTsq3RvyqWQWj2GZqyxACufSuVKNblNjULV/FX8Fyi7BfTB2GCf2Wltqx+ly
Ze9rxr2wuYwNQbxzUKP+/FxhX8hsDxWCgBWevjCMETH6T28w2e3YJ0pcHdKJy0NUNtf2F66ZdnL/
luKma20v3lFcucHbTtB42WTuRqrt0+tAzh9l54ulU+IPmu8I6NyKpwL2Rp+JFeJsJ0IIJPWGIVYN
Eh31rVkO8mg3HewNrZ6Jw33n8dzzaEI8399w0Tnypnu84B7qnh6qMaeeHAuM5Wv7DtqJ7wgyb+8I
umnHcz5wT1Ff8Apfb6+eH9tkK/I7vnYUCZXZjBzDfuWUqd15u5vTnZilmlAdE8ZszjFN3eLagco+
wb4Yp1ervycOMvu+DGnkvR8u8jE9vFurR11MLesdw5RE9ESNaVrO6QaNu30y7k+3VVt9IHxS4wFA
eioQYCGYnm50Kud2XP4aPdNR4ayhezHdjHvoSAVV0fgcwT2M79fi1+1OJywf1J1RNP25QZcD9ZKD
cLPvwK3GXkpkv0noTr3lgz0uAB9WHe7//AH9+/VdtvuLu/xq2+rl4AEp9mWxJBArJTokMo9jMDKg
NyPS1lhHbgQdL6Fo6egyVDs35At0/KjMEG+9pQCDnNmp9gCsUQj+D1/Qrqc=
""")
##file ez_setup.py
EZ_SETUP_PY = convert("""
eJzNWmtv49a1/a5fwSgwJGE0NN8PDzRFmkyBAYrcIo8CFx5XPk+LHYpUSWoctch/v+ucQ1KkZDrt
RT6UwcQ2ebjPfq6195G+/upwanZlMZvP538sy6ZuKnKwatEcD01Z5rWVFXVD8pw0GRbNPkrrVB6t
Z1I0VlNax1qM16qnlXUg7DN5EovaPLQPp7X192PdYAHLj1xYzS6rZzLLhXql2UEI2QuLZ5VgTVmd
rOes2VlZs7ZIwS3CuX5BbajWNuXBKqXZqZN/dzebWbhkVe4t8c+tvm9l+0NZNUrL7VlLvW58a7m6
sqwS/zhCHYtY9UGwTGbM+iKqGk5Qe59fXavfsYqXz0VeEj7bZ1VVVmurrLR3SGGRvBFVQRrRLzpb
utabMqzipVWXFj1Z9fFwyE9Z8TRTxpLDoSoPVaZeLw8qCNoPj4+XFjw+2rPZT8pN2q9Mb6wkCqs6
4vdamcKq7KDNa6OqtTw8VYQP42irZJi1zqtP9ey7D3/65uc//7T964cffvz4P99bG2vu2BFz3Xn/
6Ocf/qz8qh7tmuZwd3t7OB0y2ySXXVZPt21S1Lc39S3+63e7nVs3ahe79e/9nf8wm+15uOWkIRD4
Lx2xxfmNt9icum8PJ8/2bfH0tLizFknieYzI1HG90OFJkNA0jWgsvZBFImJksX5FStBJoXFKEhI4
vghCx5OUJqEQvnTTwI39kNEJKd5YlzAK4zhMeUIinkgWBE7skJQ7sRd7PE1fl9LrEsAAknA3SrlH
RRS5kvgeiUToiUAm3pRF/lgXSn2XOZLFfpqSyA/jNI1DRngqQ+JEbvKqlF4XPyEJw10eCcY9zwti
6capjDmJolQSNiElGOsSeU4QEi8QPBCuoCyOpXD8lJBARDIW4atSzn5h1CNuEkKPhBMmJfW4C30c
n/rUZcHLUthFvlBfejQM/ZRHiGss44DwOHU9CCKpk0xYxC7zBfZwweHJKOYe96QUbuA4qR8F0iPB
RKSZ64yVYXCHR2jIfeJ4YRSEEeLDXD9xHBI7qfO6mF6bMOZ4ETFKaeLEscfClIQ+SQLfJyHnk54x
YsJODBdBRFgCX6YxS9IwjD0RiiREOgqasPh1MVGvTSJQSURIJ4KDPCaiwA0gzYORcPhEtAEqY994
lAiCGnZ9jvdRRl4iYkpCGhJoxMXrYs6R4pGfypQ6EBawwAvS2PEDLpgnmMO8yUi5Y99EAUsD6VMZ
kxhZ6AuW+MKhHsIdByn1XhfT+4ZKknqu41COMHHUBCQJzn0EPgqcJJoQc4Ez0nGigMqIEI/G3IFa
8GyAxHYSN2beVKAucCZyIzf1hGB+KINYIGpuxHhEXA9SvXhKygXOSDcBQAF8uUSqEC9MWQop0uUx
jRM5gVbsAmeEI3gcRInH0jShksbwdOIgex3EPHangu2Pg0SokG4kOYdhYRi6QRK4LAZ+8TRJo3BK
ygVaUYemru8SRqjvOXAGcC6WQcBCAEXsylel9BYhSST2jHggqfRRUVSmQcQcuAqoJ6YSJhhblCi0
BvD7HuM0ZbFHmQwAX14kvYTIKbQKxxYJkUqeOFAHBYmMlb4ApocxAIMnbjQV6XBsEZHAKi7BKm7s
uELAuTHIKaQMhEeiKZQJL2KUcF9GAISAMUKS2A2QONyPKWPc5yGfkBKNLULBJGD5xHUjMFGSBLEH
EWDMMEhR2lPAGV2wGwsjIsOYwr/oHlANkQNDgsBHgYVkChuisUXUkwmJQw9kD9ilPkjaQai5CCVa
idCfkBJfwJ2DGMmUcOaTyA1F6LohyhAtRQIInMyX+IIJSCLTMAALcGC5I2kUM+lKD2HAI2+qAuKx
RQE4lgBvJVoGFGDgB67rSi4S38W/eEqX5KIbclQv5KXwSMrBHyoFAeCJ76jGynldSm8Ro8RPgA3o
OYLEZ47KWWQbnM3ALJM0kIwtcmPPjQFyCHTKmRs6YeqQMKG+QJ2n4VSk07FF0J0FDpoZV3mYBmkk
AiapcBLYypypSKcXyIAkQ2MHbvWThEdAJyKEEwG8WOQHU/1dK6W3SAqE1hchcWPqegxhYmHg0hjc
C+YXU0ySjvmIEZSNKxVqEk9wAJOb+mC2mIaphx4HUn6dDSYCjDf1rKlOd2bg2pF6l2e0m7fQu8/E
L0xg1Pio73xQI1G7Fg+H62ZcSGv7heQZun2xxa0ldNoWmAfXlhoAVnfagExa3X01M3bjgXmoLp5h
tmgwLigR+kV7J34xdzHfdcsgp1351aaXct+JfjjLUxfmLkyD79+r6aRuuKgw1y1HK9Q1Vya1FrTz
4Q2mMIIxjH9lWcu/lHWd0Xww/mGkw9/7P6zmV8JuejNHj1ajv5Q+4pesWXrmfoXgVoV2l3HoxXCo
F7Xj1eZimFv3am0pqcVmMNCtMSluMapuytpmxwq/mWTqX+AiJ6eNG87aIGFs/ObYlHv4gWG6PGEU
Lfhtb/bgpEDN9XvyGbHE8PwFriLKQXCeMu1Amp0Z5x9bpR+telcec66mWWJ8PZTWTebFcU9FZTU7
0lgYhHvBWpaagAvlXUti6u2VOhZcvyKsx5EjHi010i6fdxnbdbsLaK2OJow8a3G7WNlQ0njpUW2p
5AyOMXaiGh2QPGeYuek5EwRfIyNNgmuVixL+yCtB+OmsPvb4KAfqabfr7dqzCS2mabXU0qjQqrQO
0ScWrCx4bXzTqXEgSBTlVHhElVXWZAhd8TQ4zzARb+0vC6HPE8zZCDd6wallrnz44vmI0rI9bBCt
MH2WU5VH7CSMKqbOiLUXdU2ehDngOBfd46POl4pktbB+PNWN2H/4RfmrMIEoLNLgnjnZIFRBizJe
paAyxpx62F2G6p/PpN4aFIL9G2tx+Py0rURdHism6oVCGLX9vuTHXNTqlGQAoJePTU2g6jjyoHXb
cnVGEpVym3PRDOqy9dhFCXZlt74otDMGdEViw7OiapbOWm0yALkWqPud3g1Pd2h3zLdtA7PVwLxR
MkyAAOyXskYO0g9fQPj+pQ6Qhg5pH13vMBJtt8m1nJ81fr+Zv2ldtXrXyh6qMBbwV7Py27KQecaa
QRxgokFOBstluVzduw9DYhgmxX9KBPOfdufCmCiF5fvNTb3qy7wrb33K+akYc8GckWLRqGrrqwdw
ok72dPm0J3mqkI5FgSy3rb/kAsnTLb+Sp8pLVTmwScCWTkOZVXWzBmGoSllAwqnLCuvtzwPlF/aF
vE/Fp2L57bGqIA1IbwTcVBeUtgKhndNc2KR6qu+dh9fp7MWwfpchZzN6VBT7fdn8qQRwD3KI1PWs
LcR8/OZ6WKv3F5X+oF75Gk7RXFB+HtHpMHsNr75UxL83uapSR6aOWPW7FyhUFy05U4CVl8w0IBos
jQ1ZY86DdUPxX0qpBpDViX9Hqb/FqOqe2vWaTg3KP54ZcoIFS8N9HfUpCmHNkeRnI1pKGdNG94FC
BWahHjJrh3zMTdJ23enGGkDX25sanfZNrRrt+bAWLg68TeJD7pAplM+sN+OGsCZfBLTfoAE3FPD3
MiuWHWF0S424umJKnO6Kvwd3d420Qp/uddRd3dRLI3Z1p4rhmy9lphLoIIhix06dui+2EXqrS6ci
hyDljbrzUl4+jVap1lvFZfyuurDSfiZVsVR+fvv7XebzkBYrW3CuX8ryG50S6nOSpfgiCvUHzDlA
2dlO5AfV5X002TboNPpUQSui8l99krNUrpgB5dcWoGqmbu1RzoWAI/EK6lD1uQBd8awglmB4rWv9
9hDWNSjbs3ZLoHHb0Zx3hMq8y2Z7NlsCEcWd8rAWsydsp5orXgrDNTuEF0o0z2X1ud10bR0MYZS0
Ie2ncAopNErcAEwVisADTPfoegEknyuxrZxKtAQ0NMBe/Z5RRFKsr1JmALpX7ZPOsrWqpqvX0D/o
ZG0yNUe2bVIuxOGd+bG86LTG2dnBsKa6eq63uKAyXXItPtj4WR5Esbxa9rX1A1r82+cqawA+iDH8
q5trYPjntfog8FlFT3UArFJlCGhkZVUddXLk4kKYjvswPVTP3Qi9vsPE7mo/VJsauWGArcaP5Wqs
sUERbY3BivX8mc7hTjywtR1m6O5fwuinRsC7SwjABnd6F5aXtViuriCibu600OHzls060IKCufql
g63Zv3Mp/t4j05foQb6spxj7zLkfX/uIVHPsB3RL7aqOIF5qnS8+en6tbzajQo/VVxLPa14fJ/Rc
7lx3WeOhYTQz6Jip0hhMCqzc72GoPWoLu8Mb0o5f3dXGSLs4BxdoP6/eqLOVh5VO02exqHRaC0vR
+G+mirJU+fmCq5Ta1xyCRccC897nZW+WyGsxiMawF7e329Zb2621wQDo2I7tLv7jrv9/AfAaXNUU
TOsyF6jViUG46+NBJqZXv+rRK7Evv2i81ZEw33DQ8y6YowH05r+BuxfN92SX3RbVP8bNymDOGnY7
16PfvzG+4ecrzfzkjPZya/H/ScnXyqwX/JtSrrL5pbrryu1hPKFrZzsrJD6sUuyPwDGdKerJyxmq
dvmdHNCrrzU/+2W0pQ6gSvPl/Mertmi+7hBlDhB80kRUqcNeJCGapHNCz1cvCFwsf0A/Ne++jGMf
TuOJcm6+ZnP9TRR7tWjHreOhZ6huiKnPAP2zfmqpIqHHLG/emnNhyHxSs+JJYfIwj6t2AlLdVneO
3Is9u0R33ef+Wv2pVizPfbUW0rGhps1FRRfnZ/2xsnr3oT2Slh2tvngsLXu6M0OgIen7ufrjprrD
vzXQAgNE22ualqzbyAb97uvl6qF/2a5hcU+eBzVWzOdmVjA0PXQMQoAhsulmBv39oU13134SjSlb
dX85nKW3umfYbtu8713Sylhb2i3v2qaoc8C7S2P3pME8uIGedi1IxXbL+adi+P2fT8Xy/m+/PrxZ
/TrXDcpqOMjotwdo9AJmg8r1N7BySygc+Gp+XaYdJhpV8f/7Oy3Y1s330l09YBDTjnyjn5qHGF7x
6O7hZfMXz21OyLZB6lUfOGAGMzo/bjaL7VaV7Ha76D/1yJVEqKmr+L2nCbH7+959wDtv38JZplQG
BDaonX65d/fwEjNqlDjLVIvM9X+XVxF7
""")
##file distribute_setup.py
DISTRIBUTE_SETUP_PY = convert("""
eJztG2tz2zbyu34FTh4PqYSi7TT3GM+pM2nj9DzNJZnYaT8kHhoiIYk1X+XDsvrrb3cBkCAJyc61
dzM3c7qrIxGLxWLfuwCP/lTs6k2eTabT6Xd5Xld1yQsWxfBvvGxqweKsqnmS8DoGoMnliu3yhm15
VrM6Z00lWCXqpqjzPKkAFkdLVvDwjq+FU8lBv9h57JemqgEgTJpIsHoTV5NVnCB6+AFIeCpg1VKE
dV7u2DauNyyuPcaziPEoogm4IMLWecHylVxJ4z8/n0wYfFZlnhrUBzTO4rTIyxqpDTpqCb7/yJ2N
dliKXxsgi3FWFSKMV3HI7kVZATOQhm6qh98BKsq3WZLzaJLGZZmXHstL4hLPGE9qUWYceKqBuh17
tGgIUFHOqpwtd6xqiiLZxdl6gpvmRVHmRRnj9LxAYRA/bm+HO7i99SeTa2QX8TekhRGjYGUD3yvc
SljGBW1PSZeoLNYlj0x5+qgUE8W8vNLfql37tY5Tob+vspTX4aYdEmmBFLS/eUk/Wwk1dYwqI0eT
fD2Z1OXuvJNiFaP2yeFPVxcfg6vL64uJeAgFkH5Jzy+QxXJKC8EW7F2eCQObJrtZAgtDUVVSVSKx
YoFU/iBMI/cZL9fVTE7BD/4EZC5s1xcPImxqvkyEN2PPaaiFK4FfZWag90PgqEvY2GLBTid7iT4C
RQfmg2hAihFbgRQkQeyF/80fSuQR+7XJa1AmfNykIquB9StYPgNd7MDgEWIqwNyBmBTJdwDmmxdO
t6QmCxEK3OasP6bwOPA/MG4YHw8bbHOmx9XUYccIOIJTMMMhtenPHQXEOviiVqxuhtLJK78qOFid
C98+BD+/urz22IBp7Jkps9cXb159ensd/HTx8ery/TtYb3rq/8V/8XLaDn36+BYfb+q6OD85KXZF
7EtR+Xm5PlFOsDqpwFGF4iQ66fzSyXRydXH96cP1+/dvr4I3r368eD1YKDw7m05MoA8//hBcvnvz
Hsen0y+Tf4qaR7zm85+kOzpnZ/7p5B340XPDhCft6HE1uWrSlINVsAf4TP6Rp2JeAIX0e/KqAcpL
8/tcpDxO5JO3cSiySoG+FtKBEF58AASBBPftaDKZkBorX+OCJ1jCvzNtA+IBYk5IyknuXQ7TYJ0W
4CJhy9qb+OldhN/BU+M4uA1/y8vMdS46JKADx5XjqckSME+iYBsBIhD/WtThNlIYWi9BUGC7G5jj
mlMJihMR0oX5eSGydhctTKD2obbYm+yHSV4JDC+dQa5zRSxuug0ELQD4E7l1IKrg9cb/BeAVYR4+
TECbDFo/n97MxhuRWLqBjmHv8i3b5uWdyTENbVCphIZhaIzjsh1kr1vddmamO8nyuufAHB2xYTlH
IXcGHqRb4Ap0FEI/4N+Cy2LbMoevUVNqXTGTE99YeIBFCIIW6HlZCi4atJ7xZX4v9KRVnAEemypI
zZlpJV42MTwQ67UL/3laWeFLHiDr/q/T/wM6TTKkWJgxkKIF0XcthKHYCNsJQsq749Q+HZ//in+X
6PtRbejRHH/Bn9JA9EQ1lDuQUU1rVymqJqn7ygNLSWBlg5rj4gGWrmi4W6XkMaSol+8pNXGd7/Mm
iWgWcUraznqNtqKsIAKiVQ7rqnTYa7PaYMkroTdmPI5EwndqVWTlUA0UvNOFyflxNS92x5EP/0fe
WRMJ+ByzjgoM6uoHRJxVDjpkeXh2M3s6e5RZAMHtXoyMe8/+99E6+OzhUqdXjzgcAqScDckHfyjK
2j31WCd/lf326x4jyV/qqk8H6IDS7wWZhpT3oMZQO14MUqQBBxZGmmTlhtzBAlW8KS1MWJz92QPh
BCt+JxbXZSNa75pyMvGqgcJsS8kz6ShfVnmChoq8mHRLGJoGIPiva3Jvy6tAckmgN3WKu3UAJkVZ
W0VJLPI3zaMmERVWSl/a3TgdV4aAY0/c+2GIprdeH0Aq54ZXvK5LtwcIhhJERtC1JuE4W3HQnoXT
UL8CHoIo59DVLi3EvrKmnSlz79/jLfYzr8cMX5Xp7rRjybeL6XO12sxC1nAXfXwqbf4+z1ZJHNb9
pQVoiawdQvIm7gz8yVBwplaNeY/TIdRBRuJvSyh03RHE9Jo8O20rMnsORm/G/XZxDAUL1PooaH4P
6TpVMl+y6RgftlJCnjk11pvK1AHzdoNtAuqvqLYAfCubDKOLzz4kAsRjxadbB5yleYmkhpiiaUJX
cVnVHpgmoLFOdwDxTrscNv9k7MvxLfBfsi+Z+31TlrBKspOI2XE5A+Q9/y98rOIwcxirshRaXLsv
+mMiqSz2ARrIBiZn2PfngZ+4wSkYmamxk9/tK2a/xhqeFEP2WYxVr9tsBlZ9l9dv8iaLfrfRPkqm
jcRRqnPIXQVhKXgtht4qwM2RBbZZFIarA1H698Ys+lgCl4pXygtDPfy6a/G15kpxtW0kgu0leUil
C7U5FePjWnbuMqjkZVJ4q2i/ZdWGMrMltiPveRL3sGvLy5p0KUqwaE6m3HoFwoXtP0p6qWPS9iFB
C2iKYLc9ftwy7HG44CPCjV5dZJEMm9ij5cw5cWY+u5U8ucUVe7k/+BdRCp1Ctv0uvYqIfLlH4mA7
Xe2BOqxhnkXU6yw4BvqlWKG7wbZmWDc86TqutL8aK6na12L4jyQMvVhEQm1KqIKXFIUEtrlVv7lM
sKyaGNZojZUGihe2ufX6twDVAVs/veTYxzJs/Rs6QCV92dQue7kqCpI9b7HI/I/fC2DpnhRcg6rs
sgwRHexLtVYNax3kzRLt7Bx5/uo+j1GrC7TcqCWny3BGIb0tXlrrIR9fTT3cUt9lS6IUl9zR8BH7
KHh0QrGVYYCB5AxIZ0swuTsPO+xbVEKMhtK1gCaHeVmCuyDrGyCD3ZJWa3uJ8ayjFgSvVVh/sCmH
CUIZgj7waJBRSTYS0ZJZHptul9MRkEoLEFk3NvKZShKwliXFAAJ0iT6AB/yWcAeLmvBd55QkDHtJ
yBKUjFUlCO66Au+1zB/cVZOF6M2UE6Rhc5zaqx579uxuOzuQFcvmf1efqOnaMF5rz3Ilnx9KmIew
mDNDIW1LlpHa+ziXraRRm938FLyqRgPDlXxcBwQ9ft4u8gQcLSxg2j+vwGMXKl2wSHpCYtNNeMMB
4Mn5/HDefhkq3dEa0RP9o9qslhnTfZhBVhFYkzo7pKn0pt4qRSeqAvQNLpqBB+4CPEBWdyH/Z4pt
PLxrCvIWK5lYi0zuCCK7DkjkLcG3BQqH9giIeGZ6DeDGGHahl+44dAQ+DqftNPMsPa1XfQizXap2
3WlDN+sDQmMp4OsJkE1ibAjIGRDFMp8zNwGGtnVswVK5Nc07eya4svkh0u2JIQZYz/Quxoj2TXio
rNlmFZp2cUPeGzxWqEZ7lggysdWRGZ9ClHX8929f+8cVHmnh6aiPf0ad3Y+ITgY3DCS57ClKEjVO
1eTF2hZ/urZRtQH9sCU2ze8hWQbTCMwOuVskPBQbUHahO9WDMB5X2Gscg/Wp/5TdQSDsNd8h8VJ7
MObu168V1h09/4PpqL4QYDSC7aQA1eq02Vf/ujjXM/sxz7BjOMfiYOju9eIjb7kE6d+ZbFn1y6OO
A12HlFJ489DcXHfAgMlIC0BOqAUiEfJINm9qTHrRe2z5rrM5XecMEzaDPR6Tqq/IH0hUzTc40Tlz
ZTlAdtCDla6qF0FGk6Q/VDM8ZjmvVJ1txdGRb++4AabAhy7KY31qrMp0BJi3LBG1UzFU/Nb5DvnZ
KpriN+qaa7bwvEHzT7Xw8SYCfjW4pzEckoeC6R2HDfvMCmRQ7ZreZoRlHNNteglOVTbuga2aWMWJ
PW1056q7yBMZbQJnsJO+P97na4beeR+c9tV8Bel0e0SM6yumGAEMQdobK23burWRjvdYrgAGPBUD
/5+mQESQL39xuwNHX/e6CygJoe6Ske2xLkPPuUm6v2ZKz+Wa5IJKWoqpx9ywRdiaObqxMHZBxKnd
PfEITE5FKvfJpyayIuw2qiKxYUXq0Kbq/CAs8KWnc+6+qwKepO0rnN6AlJH/07wcO0Cr55HgB/zO
0Id/j/KXkXw0q0uJWgd5OC2yuk8C2J8iSVbVbU60n1WGjHyY4AyTksFW6o3B0W4r6vFjW+mRYXTK
hvJ6fH+PmdjQ0zwCPuvl823Q63K6IxVKIAKFd6hKMf6y5dd7FVRmwBc//DBHEWIIAXHK71+hoPEo
hT0YZ/fFhKfGVcO3d7F1T7IPxKd3Ld/6jw6yYvaIaT/Kuf+KTRms6JUdSlvslYca1Pol+5RtRBtF
s+9kH3NvOLOczCnM1KwNilKs4gdXe/ouuLRBjkKDOpSE+vveOO839oa/1YU6DfhZf4EoGYkHI2w+
Pzu/abMoGvT0tTuRNakoubyQZ/ZOEFTeWJX51nxewl7lPQi5iWGCDpsAHD6sWdYVtplRiRcYRiQe
S2OmzgslGZpZJHHtOrjOwpl9ng9O5wwWaPaZiylcwyMiSRWWhpIK64FrApopbxF+K/lj7yH1yK0+
E+RzC5VfS2lHIzC3qUTp0NFCdzlWHRViG9fasbGt0s62GIbUyJGqDpX9KuR0oGicO+rrkTbb3Xsw
fqhDdcS2wgGLCoEES5A3sltQSONWT5QLyZRKiBTPGczj0XGXhH5u0Vz6pYK6d4RsGG/IiEOYmMLk
beVj1tY/0/c/yvNeTLbBK5bgjHrliT1xH2gLxXzEsCA3rjyu4tz1rhAjvmGr0jhIevXh8g8mfNYV
gUOEoJB9ZTRvc5nvFpgliSzM7aI5YpGohbo1h8EbT+LbCIiaGg1z2PYYbjEkz9dDQ30233kwih65
NGi3bodYVlG8oEMF6QtRIckXxg9EbFHm93EkIvn6Q7xS8OaLFpXRfIjUhbvU6w41dMfRrDj6gcNG
mV0KChsw1BsSDIjkWYjtHuhYW+WNcKBlA/XH/hqll4aBVUo5VuZ1PbUlyyZ8kUUqaNCdsT2byuby
Nl8nvB4daN/7+2hWqerJijTAYfOwlqaKceFzP0n7MiYLKYcTKEWiuy//RJ3rdyO+Igfdm4QeaD4P
eNOfN24/m7rRHt2hWdP5snR/dNZr+PtMDEXbz/5/rzwH9NJpZyaMhnnCmyzcdClc92QYKT+qkd6e
MbSxDcfWFr6RJCGo4NdvtEioIi5Yyss7PMvPGacDWN5NWDat8bSp3vk3N5gufHbmoXkjm7IzvGKT
iLlqAczFA72/BDnzPOUZxO7IuTFCnMZ4etP2A7BpZiaYn/tvXNyw5+20icZB93OsL9O03DMuJVci
WcnG+WLqTz2WCrw4UC0wpnQnM+oiNR0EKwh5zEiXAErgtmQt/gzlFSN9j1jvr7vQgD4Z3/XKtxlW
1Wke4Vth0v9js58AClGmcVXRa1rdkZ1GEoMSUsMLZB5VPrvFDTjtxRB8RQuQrgQRMrpGDYQqDsBX
mKx25KAnlqkpT4iIFF+5o8siwE8imRqAGg/22JUWg8Yud2wtaoXLnfVvUKiELMyLnfkbCjHI+NWN
QMlQeZ1cAyjGd9cGTQ6APty0eYEWyygf0AMYm5PVpK0+YCXyhxBRFEivclbDqv898EtHmrAePepC
S8VXAqUqBsf6HaTPC6hAI1et0Xdlmq4FccvHPwcB8T4Z9m1evvwb5S5hnIL4qGgC+k7/enpqJGPJ
ylei1zil8rc5xUeB1ipYhdw3STYN3+zpsb8z94XHXhocQhvD+aJ0AcOZh3hezKzlQpgWBONjk0AC
+t3p1JBtiNSVmO0ApaTetR09jBDdid1CK6CPx/2gvkizgwQ4M48pbPLqsGYQZG500QNwtRbcWi2q
LokDU7kh8wZKZ4z3iKRzQGtbQwu8z6DR2TlJOdwAcZ2MFd7ZGLCh88UnAIYb2NkBQFUgmBb7b9x6
lSqKkxPgfgJV8Nm4AqYbxYPq2nZPgZAF0XLtghJOlWvBN9nwwpPQ4SDlMdXc9x7bc8mvCwSXh153
JRW44NVOQWnnd/j6v4rxw5fbgLiY7r9g8hRQRR4ESGoQqHcpie42ap6d38wm/wIwBuVg
""")
##file activate.sh
ACTIVATE_SH = convert("""
eJytVU1v4jAQPW9+xTT0ANVS1GsrDlRFAqmFqmG72m0rY5IJsRRslDiktNr/vuMQ8tFQpNU2B4I9
H36eeW/SglkgYvBFiLBKYg0LhCRGD1KhA7BjlUQuwkLIHne12HCNNpz5kVrBgsfBmdWCrUrA5VIq
DVEiQWjwRISuDreW5eE+CtodeLeAnhZEGKMGFXqAciMiJVcoNWx4JPgixDjzEj48QVeCfcqmtzfs
cfww+zG4ZfeD2ciGF7gCHaDMPM1jtvuHXAsPfF2rSGeOxV4iDY5GUGb3xVEYv2aj6WQ0vRseAlMY
G5DKsAawwnQUXt2LQOYlzZoYByqhonqoqfxZf4BLD97i4DukgXADCPgGgdOLTK5arYxZB1xnrc9T
EQFcHoZEAa1gSQioo/TPV5FZrDlxJA+NzwF+Ek1UonOzFnKZp6k5mgLBqSkuuAGXS4whJb5xz/xs
wXCHjiVerAk5eh9Kfz1wqOldtVv9dkbscfjgjKeTA8XPrtaNauX5rInOxaHuOReNtpFjo1/OxdFG
5eY9hJ3L3jqcPJbATggXAemDLZX0MNZRYjSDH7C1wMHQh73DyYfTu8a0F9v+6D8W6XNnF1GEIXW/
JrSKPOtnW1YFat9mrLJkzLbyIlTvYzV0RGXcaTBfVLx7jF2PJ2wyuBsydpm7VSVa4C4Zb6pFO2TR
huypCEPwuQjNftUrNl6GsYZzuFrrLdC9iJjQ3omAPBbcI2lsU77tUD43kw1NPZhTrnZWzuQKLomx
Rd4OXM1ByExVVkmoTwfBJ7Lt10Iq1Kgo23Bmd8Ib1KrGbsbO4Pp2yO4fpnf3s6MnZiwuiJuls1/L
Pu4yUCvhpA+vZaJvWWDTr0yFYYyVnHMqCEq+QniuYX225xmnzRENjbXACF3wkCYNVZ1mBwxoR9Iw
WAo3/36oSOTfgjwEEQKt15e9Xpqm52+oaXxszmnE9GLl65RH2OMmS6+u5acKxDmlPgj2eT5/gQOX
LLK0j1y0Uwbmn438VZkVpqlfNKa/YET/53j+99G8H8tUhr9ZSXs2
""")
##file activate.fish
ACTIVATE_FISH = convert("""
eJydVm1v4jgQ/s6vmA1wBxUE7X2stJVYlVWR2lK13d6d9laRk0yIr8HmbIe0++tvnIQQB9pbXT5A
Ys/LM55nZtyHx5RrSHiGsMm1gRAh1xhDwU0Kng8hFzMWGb5jBv2E69SDs0TJDdj3MxilxmzPZzP7
pVPMMl+q9bjXh1eZQ8SEkAZULoAbiLnCyGSvvV6SC7IoBcS4Nw0wjcFbvJDcjiuTswzFDpiIQaHJ
lQAjQUi1YRmUboC2uZJig8J4PaCnT5IaDcgsbm/CjinOwgx1KcUTMEhhTgV4g2B1fRk8Le8fv86v
g7v545UHpZB9rKnp+gXsMhxLunIIpwVQxP/l9c/Hq9Xt1epm4R27bva6AJqN92G4YhbMG2i+LB+u
grv71c3dY7B6WtzfLy9bePbp0taDTXSwJQJszUnnp0y57mvpPcrF7ZODyhswtd59+/jdgw+fwBNS
xLSscksUPIDqwwNmCez3PpxGeyBYg6HE0YdcWBxcKczYzuVJi5Wu915vn5oWePCCoPUZBN5B7IgV
MCi54ZDLG7TUZ0HweXkb3M5vFmSpFm/gthhBx0UrveoPpv9AJ9unIbQYdUoe21bKg2q48sPFGVwu
H+afrxd1qvclaNlRFyh1EQ2sSccEuNAGWQwysfVpz1tPajUqbqJUnEcIJkWo6OXDaodK8ZiLdbmM
L1wb+9H0D+pcyPSrX5u5kgWSygRYXCnJUi/KKcuU4cqsAyTKZBiissLc7NFwizvjxtieKBVCIdWz
fzilzPaYyljZN0cGN1v7NnaIPNCGmVy3GKuJaQ6iVjE1Qfm+36hglErwmnAD8hu0dDy4uICBA8ZV
pQr/q/+O0KFW2kjelu9Dgb9SDBsWV4F4x5CswgS0zBVlk5tDMP5bVtUGpslbm81Lu2sdKq7uNMGh
MVQ4fy9xhogC1lS5guhISa0DlBWv0O8odT6/LP+4WZzDV6FzIkEqC0uolGZSZoMnlpxplmD2euaT
O4hkTpPnbztDccey0bhjDaBIqaWQa0uwEtQEwtyU56i4fq54F9IE3ORR6mKriODM4XOYZwaVYLYz
7SPbKkz4i7VkB6/Ot1upDE3znNqYKpM8raa0Bx8vfvntJ32UENsM4aI6gJL+jJwhxhh3jVIDOcpi
m0r2hmEtS8XXXNBk71QCDXTBNhhPiHX2LtHkrVIlhoEshH/EZgdq53Eirqs5iFKMnkOmqZTtr3Xq
djvPTWZT4S3NT5aVLgurMPUWI07BRVYqkQrmtCKohNY8qu9EdACoT6ki0a66XxVF4f9AQ3W38yO5
mWmZmIIpnDFrbXakvKWeZhLwhvrbUH8fahhqD0YUcBDJjEBMQwiznE4y5QbHrbhHBOnUAYzb2tVN
jJa65e+eE2Ya30E2GurxUP8ssA6e/wOnvo3V78d3vTcvMB3n7l3iX1JXWqk=
""")
##file activate.csh
ACTIVATE_CSH = convert("""
eJx9U11vmzAUffevOCVRu+UB9pws29Kl0iq1aVWllaZlcgxciiViItsQdb9+xiQp+dh4QOB7Pu49
XHqY59IgkwVhVRmLmFAZSrGRNkdgykonhFiqSCRW1sJSmJg8wCDT5QrucRCyHn6WFRKhVGmhKwVp
kUpNiS3emup3TY6XIn7DVNQyJUwlrgthJD6n/iCNv72uhCzCpFx9CRkThRQGKe08cWXJ9db/yh/u
pvzl9mn+PLnjj5P5D1yM8QmXlzBkSdXwZ0H/BBc0mEo5FE5qI2jKhclHOOvy9HD/OO/6YO1mX9vx
sY0H/tPIV0dtqel0V7iZvWyNg8XFcBA0ToEqVeqOdNUEQFvN41SumAv32VtJrakQNSmLWmgp4oJM
yDoBHgoydtoEAs47r5wHHnUal5vbJ8oOI+9wI86vb2d8Nrm/4Xy4RZ8R85E4uTZPB5EZPnTaaAGu
E59J8BE2J8XgrkbLeXMlVoQxznEYFYY8uFFdxsKQRx90Giwx9vSueHP1YNaUSFG4vTaErNSYuBOF
lXiVyXa9Sy3JdClEyK1dD6Nos9mEf8iKlOpmqSNTZnYjNEWiUYn2pKNB3ttcLJ3HmYYXy6Un76f7
r8rRsC1TpTJj7f19m5sUf/V3Ir+x/yjtLu8KjLX/CmN/AcVGUUo=
""")
##file activate.bat
ACTIVATE_BAT = convert("""
eJyFUkEKgzAQvAfyhz0YaL9QEWpRqlSjWGspFPZQTevFHOr/adQaU1GaUzI7Mzu7ZF89XhKkEJS8
qxaKMMsvboQ+LxxE44VICSW1gEa2UFaibqoS0iyJ0xw2lIA6nX5AHCu1jpRsv5KRjknkac9VLVug
sX9mtzxIeJDE/mg4OGp47qoLo3NHX2jsMB3AiDht5hryAUOEifoTdCXbSh7V0My2NMq/Xbh5MEjU
ZT63gpgNT9lKOJ/CtHsvT99re3pX303kydn4HeyOeAg5cjf2EW1D6HOPkg9NGKhu
""")
##file deactivate.bat
DEACTIVATE_BAT = convert("""
eJxzSE3OyFfIT0vj4spMU0hJTcvMS01RiPf3cYkP8wwKCXX0iQ8I8vcNCFHQ4FIAguLUEgWIgK0q
FlWqXJpcICVYpGzx2BAZ4uHv5+Hv6wq1BWINXBTdKriEKkI1DhW2QAfhttcxxANiFZCBbglQSJUL
i2dASrm4rFz9XLgAwJNbyQ==
""")
##file distutils-init.py
DISTUTILS_INIT = convert("""
eJytV92L4zYQf/dfMU0ottuse7RvC6FQrg8Lxz2Ugz4si9HacqKuIxlJ2ST313dG8odkO9d7aGBB
luZLv/nNjFacOqUtKJMIvzK3cXlhWgp5MDBsqK5SNYftsBAGpLLA4F1oe2Ytl+9wUvW55TswCi4c
KibhbFDSglXQCFmDPXIwtm7FawLRbwtPzg2T9gf4gupKv4GS0N262w7V0NvpbCy8cvTo3eAus6C5
ETU3ICQZX1hFTw/dzR6V/AW1RCN4/XAtbsVXqIXmlVX6liS4lOzEYY9QFB2zx6LfoSNjz1a0pqT9
QOIfJWQ2E888NEVZNqLlZZnvIB0NpHkimlFdKn2iRRY7yGG/CCJb6Iz280d34SFXBS2yEYPNF0Q7
yM7oCjpWvbEDQmnhRwOs6zjThpKE8HogwRAgraqYFZgGZvzmzVh+mgz9vskT3hruwyjdFcqyENJw
bbMPO5jdzonxK68QKT7B57CMRRG5shRSWDTX3dI8LzRndZbnSWL1zfvriUmK4TcGWSnZiEPCrxXv
bM+sP7VW2is2WgWXCO3sAu3Rzysz3FiNCA8WPyM4gb1JAAmCiyTZbhFjWx3h9SzauuRXC9MFoVbc
yNTCm1QXOOIfIn/g1kGMhDUBN72hI5XCBQtIXQw8UEEdma6Jaz4vJIJ51Orc15hzzmu6TdFp3ogr
Aof0c98tsw1SiaiWotHffk3XYCkqdToxWRfTFXqgpg2khcLluOHMVC0zZhLKIomesfSreUNNgbXi
Ky9VRzwzkBneNoGQyyvGjbsFQqOZvpWIjqH281lJ/jireFgR3cPzSyTGWzQpDNIU+03Fs4XKLkhp
/n0uFnuF6VphB44b3uWRneSbBoMSioqE8oeF0JY+qTvYfEK+bPLYdoR4McfYQ7wMZj39q0kfP8q+
FfsymO0GzNlPh644Jje06ulqHpOEQqdJUfoidI2O4CWx4qOglLye6RrFQirpCRXvhoRqXH3sYdVJ
AItvc+VUsLO2v2hVAWrNIfVGtkG351cUMNncbh/WdowtSPtCdkzYFv6mwYc9o2Jt68ud6wectBr8
hYAulPSlgzH44YbV3ikjrulEaNJxt+/H3wZ7bXSXje/YY4tfVVrVmUstaDwwOBLMg6iduDB0lMVC
UyzYx7Ab4kjCqdViEJmDcdk/SKbgsjYXgfMznUWcrtS4z4fmJ/XOM1LPk/iIpqass5XwNbdnLb1Y
8h3ERXSWZI6rZJxKs1LBqVH65w0Oy4ra0CBYxEeuOMbDmV5GI6E0Ha/wgVTtkX0+OXvqsD02CKLf
XHbeft85D7tTCMYy2Njp4DJP7gWJr6paVWXZ1+/6YXLv/iE0M90FktiI7yFJD9e7SOLhEkkaMTUO
azq9i2woBNR0/0eoF1HFMf0H8ChxH/jgcB34GZIz3Qn4/vid+VEamQrOVqAPTrOfmD4MPdVh09tb
8dLLjvh/61lEP4yW5vJaH4vHcevG8agXvzPGoOhhXNncpTr99PTHx6e/UvffFLaxUSjuSeP286Dw
gtEMcW1xKr/he4/6IQ6FUXP+0gkioHY5iwC9Eyx3HKO7af0zPPe+XyLn7fAY78k4aiR387bCr5XT
5C4rFgwLGfMvJuAMew==
""")
##file distutils.cfg
DISTUTILS_CFG = convert("""
eJxNj00KwkAMhfc9xYNuxe4Ft57AjYiUtDO1wXSmNJnK3N5pdSEEAu8nH6lxHVlRhtDHMPATA4uH
xJ4EFmGbvfJiicSHFRzUSISMY6hq3GLCRLnIvSTnEefN0FIjw5tF0Hkk9Q5dRunBsVoyFi24aaLg
9FDOlL0FPGluf4QjcInLlxd6f6rqkgPu/5nHLg0cXCscXoozRrP51DRT3j9QNl99AP53T2Q=
""")
##file activate_this.py
ACTIVATE_THIS = convert("""
eJyNUlGL2zAMfvevEBlHEujSsXsL9GGDvW1jD3sZpQQ3Ua7aJXawnbT595Ocpe0dO5ghseVP+vRJ
VpIkn2cYPZknwAvWLXWYhRP5Sk4baKgOWRWNqtpdgTyH2Y5wpq5Tug406YAgKEzkwqg7NBPwR86a
Hk0olPopaK0NHJHzYQPnE5rI0o8+yBUwiBfyQcT8mMPJGiAT0A0O+b8BY4MKJ7zPcSSzHaKrSpJE
qeDmUgGvVbPCS41DgO+6xy/OWbfAThMn/OQ9ukDWRCSLiKzk1yrLjWapq6NnvHUoHXQ4bYPdrsVX
4lQMc/q6ZW975nmSK+oH6wL42a9H65U6aha342Mh0UVDzrD87C1bH73s16R5zsStkBZDp0NrXQ+7
HaRnMo8f06UBnljKoOtn/YT+LtdvSyaT/BtIv9KR60nF9f3qmuYKO4//T9ItJMsjPfgUHqKwCZ3n
xu/Lx8M/UvCLTxW7VULHxB1PRRbrYfvWNY5S8it008jOjcleaMqVBDnUXcWULV2YK9JEQ92OfC96
1Tv4ZicZZZ7GpuEpZbbeQ7DxquVx5hdqoyFSSmXwfC90f1Dc7hjFs/tK99I0fpkI8zSLy4tSy+sI
3vMWehjQNJmE5VePlZbL61nzX3S93ZcfDqznnkb9AZ3GWJU=
""")
if __name__ == '__main__':
main()
## TODO:
## Copy python.exe.manifest
## Monkeypatch distutils.sysconfig
|
mit
|
maurofaccenda/ansible
|
lib/ansible/utils/module_docs_fragments/postgres.py
|
143
|
2774
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Postgres documentation fragment
DOCUMENTATION = """
options:
login_user:
description:
- The username used to authenticate with
required: false
default: postgres
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: null
login_unix_socket:
description:
- Path to a Unix domain socket for local connections
required: false
default: null
port:
description:
- Database port to connect to.
required: false
default: 5432
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes.
- Default of C(prefer) matches libpq default.
required: false
default: prefer
choices: [disable, allow, prefer, require, verify-ca, verify-full]
version_added: '2.3'
ssl_rootcert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
required: false
default: null
version_added: '2.3'
notes:
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev),
and C(python-psycopg2) packages on the remote host before using this module.
- The ssl_rootcert parameter requires at least Postgres version 8.4 and I(psycopg2) version 2.4.3.
requirements: [ psycopg2 ]
"""
|
gpl-3.0
|
oikarinen/plugin.audio.spotlight
|
server_addon.py
|
1
|
1074
|
#
# Copyright (c) Dariusz Biskup
#
# This file is part of Spotlight
#
# Spotlight is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of
# the License, or (at your option) any later version.
#
# Spotlight is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
from spotlight.service.util.LibLoader import LibLoader
from spotlight.model.GlobalSettings import GlobalSettings
settings = GlobalSettings()
loader = LibLoader(settings)
loader.load_all()
xbmc.log("LibLoader sys.path is now %s" % sys.path)
from spotlight.service.Server import Server
server = Server()
server.start()
server.start_rpc_server()
|
gpl-3.0
|
parag2489/Image-Quality
|
train_imageQuality_regressMOS_smallNetwork.py
|
1
|
14262
|
import pdb
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape
from keras.layers.convolutional import Convolution1D, Convolution2D, MaxPooling2D
# from keras.layers.normalization import BatchNormalization
# from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import SGD, RMSprop, Adam
from keras.layers.core import Merge
from keras.regularizers import l2, activity_l2
import numpy as np
import scipy
import theano
from keras.layers.convolutional import ZeroPadding2D
# from scipy import io
from keras import backend as K
import h5py
from keras.utils import np_utils
import time
import cv2
import logging
from keras import callbacks
from keras.callbacks import ModelCheckpoint, EarlyStopping
from decimal import Decimal
doWeightLoadSaveTest = True
patchHeight = 32
patchWidth = 32
channels = 3
learningRate = 0.0001
regularizer = 0.0005
initialization = "he_uniform"
# leak = 1./3. # for PReLU()
Numepochs = 200
batchSize = 50
validateAfterEpochs = 1
numSamplesPerfile = 286200
NumSamplesinValidation = 106000
nb_output = 1
TrainFilesPath = '/media/ASUAD\pchandak/Seagate Expansion Drive/imageQuality_HDF5Files_Apr20/hdf5Files_train/'
ValFilesPath = '/media/ASUAD\pchandak/Seagate Expansion Drive/imageQuality_HDF5Files_Apr20/hdf5Files_val/'
TestFilesPath = '/media/ASUAD\pchandak/Seagate Expansion Drive/imageQuality_HDF5Files_Apr20/hdf5Files_test/'
logger = '/media/AccessParag/Code/DNN_imageQuality_regression_Apr20_corrlnLoss.txt'
weightSavePath = '/media/AccessParag/Code/weights_MOSRegress/'
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=logger,
filemode='w')
class LossHistory(callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
# pdb.set_trace()
# print ""
logging.info(" -- The loss of batch # " + str(batch) + "is " + str(logs.get('loss')))
# if np.isnan(logs.get("loss")):
# pdb.set_trace()
self.losses.append(logs.get('loss'))
class myCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs={}):
logging.info("Epoch " + str(epoch) + ":")
# pdb.set_trace()
if epoch == 0:
self.best_mean_corr = -np.inf
self.metric = []
# if epoch % 5 == 0:
# model.optimizer.lr.set_value(round(Decimal(0.6*model.optimizer.lr.get_value()),8))
# model.optimizer.lr.set_value(0.9*learningRate)
# learningRate = model.optimizer.lr.get_value()
# printing("The current learning rate is: " + str(learningRate))
def on_epoch_end(self, epoch, logs={}):
model.save_weights(weightSavePath + "bestWeights_regressMOS_smallNetwork_latestModel.h5",overwrite=True)
logging.info(" -- Epoch "+str(epoch)+" done, loss : "+ str(logs.get('loss')))
predictedScoresVal = np.ravel(model.predict(valData,batch_size=batchSize))
predictedScoresTest = np.ravel(model.predict(testData,batch_size=batchSize))
sroccVal = scipy.stats.spearmanr(predictedScoresVal, valLabels)
plccVal = scipy.stats.pearsonr(predictedScoresVal, valLabels)
sroccTest = scipy.stats.spearmanr(predictedScoresTest, testLabels)
plccTest = scipy.stats.pearsonr(predictedScoresTest, testLabels)
t_str_val = '\nSpearman corr for validation set is ' + str(sroccVal[0]) + '\nPearson corr for validation set is '+ str(plccVal[0]) + '\nMean absolute error for validation set is ' + str(np.mean(np.abs(predictedScoresVal-valLabels)))
t_str_test = '\nSpearman corr for test set is ' + str(sroccTest[0]) + '\nPearson corr for test set is '+ str(plccTest[0]) + '\nMean absolute error for test set is ' + str(np.mean(np.abs(predictedScoresTest-testLabels)))
print t_str_val
print t_str_test
mean_corr = sroccVal[0] + plccVal[0]
if mean_corr > self.best_mean_corr:
self.best_mean_corr = mean_corr
model.save_weights(weightSavePath + "bestWeights_regressMOS_smallNetwork_bestCorr.h5",overwrite=True)
printing("Best correlation loss model saved at Epoch " + str(epoch) + "\n")
self.metric.append(logs.get("val_loss"))
if epoch % 5 == 0:
model.optimizer.lr.set_value(round(Decimal(0.8*model.optimizer.lr.get_value()),8))
learningRate = model.optimizer.lr.get_value()
printing("")
printing("The current learning rate is: " + str(learningRate))
# if epoch > 0:
# metric_history = self.metric[-2:]
# metric_history_diff = np.diff(metric_history)
# testIncrease = np.any(metric_history_diff>=0)
# if testIncrease:
# model.optimizer.lr.set_value(round(Decimal(0.7*model.optimizer.lr.get_value()),8))
# learningRate = model.optimizer.lr.get_value()
# printing("")
# printing("The current learning rate is: " + str(learningRate))
def linear_correlation_loss(y_true, y_pred):
mean_y_true = K.mean(y_true)
mean_y_pred = K.mean(y_pred)
std_y_true = K.std(y_true)+1e-6
std_y_pred = K.std(y_pred)+1e-6
nSamples = K.shape(y_true)[0]
firstTerm = (y_true - mean_y_true)/std_y_true
secondTerm = (y_pred - mean_y_pred)/std_y_pred
pearsonCorr = K.sum(firstTerm*secondTerm)/(nSamples-1)
pearsonCorr = K.clip(pearsonCorr,-1.,1.)
maeLoss = K.mean(K.abs(y_true-y_pred))
# loss = 1./(0.1+K.exp(-0.5*K.log(maeLoss+(1-pearsonCorr))))
loss = (1./(0.1+K.exp(-0.5*K.log(maeLoss))))*(2-pearsonCorr)
return loss
def printing(str):
#logIntoaFile = True
print str
logging.info(str)
def boolToStr(boolVal):
if boolVal:
return "Yes"
else:
return "No"
def emailSender(mystr):
import smtplib
fromaddr = '[email protected]'
toaddrs = '[email protected]'
SUBJECT = "From Python Program"
message = """\
From: %s
To: %s
Subject: %s
%s
""" % (fromaddr, ", ".join(toaddrs), SUBJECT, mystr)
username = '[email protected]'
password = 'Dreamsonfire!'
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username,password)
server.sendmail(fromaddr, toaddrs, message)
server.quit()
printing('Parameters that will be used')
printing("---------------------------------------------------------------------------------")
printing("**Image Sizes**")
printing("Image Height : "+str(patchHeight))
printing("Image Width : "+str(patchWidth))
printing("Image Channels: "+str(channels))
printing("\n")
printing("**Network Parameters**")
printing("Learning Rate : "+str(learningRate))
printing("Regularizer : "+str(regularizer))
printing("Initialization : "+initialization)
printing("\n")
printing("**Run Variables**")
printing("Number of samples per file : "+ str(numSamplesPerfile))
printing("Total # of epochs : "+str(Numepochs))
printing("# samples per batch : "+str(batchSize))
printing("Validate After Epochs : "+str(validateAfterEpochs))
printing("Total number of validation samples : "+str(NumSamplesinValidation))
printing("\n")
printing("**Files Path**")
printing("Trainig Files Path : "+TrainFilesPath)
printing("Valid Files Path : "+ValFilesPath)
printing("Logger File Path : "+logger)
printing("Weights Save Path : "+weightSavePath)
printing("\n")
printing("---------------------------------------------------------------------------------")
model = Sequential()
model.add(Activation('linear',input_shape=(channels,patchHeight,patchWidth))) # 32
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 30
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 28
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 28
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 25
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 23
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 21
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 19
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 18
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 16
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 14
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 12
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 11
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 9
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 7
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 5
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 4
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Convolution2D(256, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 2
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 1
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Reshape((1 * 1 * 256,)))
# model.add(Dropout(0.25))
model.add(Dense(800, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(800, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(nb_output, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "linear"))
printing("Built the model")
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
if doWeightLoadSaveTest:
# pdb.set_trace()
model.save_weights(weightSavePath + 'weightsLoadSaveTest.h5', overwrite=True)
model.load_weights(weightSavePath + 'weightsLoadSaveTest.h5')
printing("Weight load/save test passed...")
# model.load_weights('/media/AccessParag/Code/weights/bestWeightsAtEpoch_000.h5')
# printing("Weights at Epoch 0 loaded")
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
sgd = SGD(lr=learningRate, decay=1e-6, momentum=0.9, nesterov=True)
# adam = Adam(lr=learningRate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss=linear_correlation_loss, optimizer=sgd)
printing("Compilation Finished")
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
checkpointer = ModelCheckpoint(filepath = weightSavePath + "bestWeights_smallNetwork_bestLoss.h5", monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
cb = myCallback()
history = LossHistory()
terminateTraining = EarlyStopping(monitor='val_loss', patience=20, verbose=1, mode='auto')
hdfFileTrain = h5py.File(TrainFilesPath + "QualityRegressMOS_data_March31.h5","r")
trainData = hdfFileTrain["data"][:]
trainLabels = hdfFileTrain["labels"][:]
# random selection to make the number of samples equal to numSamplesPerfile and/or NumSamplesinValidation
# randIndices = np.random.permutation(len(trainLabels))
# randIndices = randIndices[0:numSamplesPerfile]
# trainData = trainData[randIndices,...]
# trainLabels = trainLabels[randIndices,...]
hdfFileVal = h5py.File(ValFilesPath + "QualityRegressMOS_data_March31.h5","r")
valData = hdfFileVal["data"][:]
valLabels = hdfFileVal["labels"][:]
# random selection to make the number of samples equal to numSamplesPerfile and/or NumSamplesinValidation
# randIndices = np.random.permutation(len(valLabels))
# randIndices = randIndices[0:NumSamplesinValidation]
# valData = valData[randIndices,...]
# valLabels = valLabels[randIndices,...]
hdfFileTest = h5py.File(TestFilesPath + "QualityRegressMOS_data_March31.h5","r")
testData = hdfFileTest["data"][:]
testLabels = hdfFileTest["labels"][:]
model.fit(trainData,trainLabels,batch_size=batchSize,nb_epoch=Numepochs,verbose=1,callbacks=[cb,history,checkpointer],validation_data=(valData,valLabels),shuffle=True,show_accuracy=False)
pdb.set_trace()
|
mit
|
gasongjian/ttpy
|
tt/eigb/eigb.py
|
1
|
2316
|
import numpy as np
import tt_eigb
from tt import tensor
def eigb(A, y0, eps, rmax = 150, nswp = 20, max_full_size = 1000, verb = 1):
""" Approximate computation of minimal eigenvalues in tensor train format
This function uses alternating least-squares algorithm for the computation of several
minimal eigenvalues. If you want maximal eigenvalues, just send -A to the function.
:Reference:
S. V. Dolgov, B. N. Khoromskij, I. V. Oseledets, and D. V. Savostyanov.
Computation of extreme eigenvalues in higher dimensions using block tensor train format. Computer Phys. Comm.,
185(4):1207-1216, 2014. http://dx.doi.org/10.1016/j.cpc.2013.12.017
:param A: Matrix in the TT-format
:type A: matrix
:param y0: Initial guess in the block TT-format, r(d+1) is the number of eigenvalues sought
:type y0: tensor
:param eps: Accuracy required
:type eps: float
:param rmax: Maximal rank
:type rmax: int
:param kickrank: Addition rank, the larger the more robus the method,
:type kickrank: int
:rtype: A tuple (ev, tensor), where ev is a list of eigenvalues, tensor is an approximation to eigenvectors.
:Example:
>>> import tt
>>> import tt.eigb
>>> d = 8; f = 3
>>> r = [8] * (d * f + 1); r[d * f] = 8; r[0] = 1
>>> x = tt.rand(n, d * f, r)
>>> a = tt.qlaplace_dd([8, 8, 8])
>>> sol, ev = tt.eigb.eigb(a, x, 1e-6, verb=0)
Solving a block eigenvalue problem
Looking for 8 eigenvalues with accuracy 1E-06
swp: 1 er = 35.93 rmax:19
swp: 2 er = 4.51015E-04 rmax:18
swp: 3 er = 1.87584E-12 rmax:17
Total number of matvecs: 0
>>> print ev
[ 0.00044828 0.00089654 0.00089654 0.00089654 0.0013448 0.0013448
0.0013448 0.00164356]
"""
ry = y0.r.copy()
lam = tt_eigb.tt_block_eig.tt_eigb(y0.d, A.n, A.m, A.tt.r, A.tt.core, y0.core, ry, eps, \
rmax, ry[y0.d], 0, nswp, max_full_size, verb)
y = tensor()
y.d = y0.d
y.n = A.n.copy()
y.r = ry
y.core = tt_eigb.tt_block_eig.result_core.copy()
tt_eigb.tt_block_eig.deallocate_result()
y.get_ps()
return y,lam
|
mit
|
mbareta/edx-platform-ft
|
lms/djangoapps/teams/tests/test_views.py
|
16
|
63213
|
# -*- coding: utf-8 -*-
"""Tests for the teams API at the HTTP request level."""
import json
from datetime import datetime
import pytz
from dateutil import parser
import ddt
from elasticsearch.exceptions import ConnectionError
from mock import patch
from search.search_engine_base import SearchEngine
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db.models.signals import post_save
from django.utils import translation
from nose.plugins.attrib import attr
import unittest
from rest_framework.test import APITestCase, APIClient
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from courseware.tests.factories import StaffFactory
from common.test.utils import skip_signal
from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory
from student.models import CourseEnrollment
from util.testing import EventTestMixin
from .factories import CourseTeamFactory, LAST_ACTIVITY_AT
from ..models import CourseTeamMembership
from ..search_indexes import CourseTeamIndexer, CourseTeam, course_team_post_save_callback
from django_comment_common.models import Role, FORUM_ROLE_COMMUNITY_TA
from django_comment_common.utils import seed_permissions_roles
@attr('shard_1')
class TestDashboard(SharedModuleStoreTestCase):
"""Tests for the Teams dashboard."""
test_password = "test"
NUM_TOPICS = 10
@classmethod
def setUpClass(cls):
super(TestDashboard, cls).setUpClass()
cls.course = CourseFactory.create(
teams_configuration={
"max_team_size": 10,
"topics": [
{
"name": "Topic {}".format(topic_id),
"id": topic_id,
"description": "Description for topic {}".format(topic_id)
}
for topic_id in range(cls.NUM_TOPICS)
]
}
)
def setUp(self):
"""
Set up tests
"""
super(TestDashboard, self).setUp()
# will be assigned to self.client by default
self.user = UserFactory.create(password=self.test_password)
self.teams_url = reverse('teams_dashboard', args=[self.course.id])
def test_anonymous(self):
"""Verifies that an anonymous client cannot access the team
dashboard, and is redirected to the login page."""
anonymous_client = APIClient()
response = anonymous_client.get(self.teams_url)
redirect_url = '{0}?next={1}'.format(settings.LOGIN_URL, self.teams_url)
self.assertRedirects(response, redirect_url)
def test_not_enrolled_not_staff(self):
""" Verifies that a student who is not enrolled cannot access the team dashboard. """
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(self.teams_url)
self.assertEqual(404, response.status_code)
def test_not_enrolled_staff(self):
"""
Verifies that a user with global access who is not enrolled in the course can access the team dashboard.
"""
staff_user = UserFactory(is_staff=True, password=self.test_password)
staff_client = APIClient()
staff_client.login(username=staff_user.username, password=self.test_password)
response = staff_client.get(self.teams_url)
self.assertContains(response, "TeamsTabFactory", status_code=200)
def test_enrolled_not_staff(self):
"""
Verifies that a user without global access who is enrolled in the course can access the team dashboard.
"""
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(self.teams_url)
self.assertContains(response, "TeamsTabFactory", status_code=200)
def test_enrolled_teams_not_enabled(self):
"""
Verifies that a user without global access who is enrolled in the course cannot access the team dashboard
if the teams feature is not enabled.
"""
course = CourseFactory.create()
teams_url = reverse('teams_dashboard', args=[course.id])
CourseEnrollmentFactory.create(user=self.user, course_id=course.id)
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(teams_url)
self.assertEqual(404, response.status_code)
@unittest.skip("Fix this - getting unreliable query counts")
def test_query_counts(self):
# Enroll in the course and log in
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.client.login(username=self.user.username, password=self.test_password)
# Check the query count on the dashboard with no teams
with self.assertNumQueries(18):
self.client.get(self.teams_url)
# Create some teams
for topic_id in range(self.NUM_TOPICS):
team = CourseTeamFactory.create(
name=u"Team for topic {}".format(topic_id),
course_id=self.course.id,
topic_id=topic_id,
)
# Add the user to the last team
team.add_user(self.user)
# Check the query count on the dashboard again
with self.assertNumQueries(24):
self.client.get(self.teams_url)
def test_bad_course_id(self):
"""
Verifies expected behavior when course_id does not reference an existing course or is invalid.
"""
bad_org = "badorgxxx"
bad_team_url = self.teams_url.replace(self.course.id.org, bad_org)
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(bad_team_url)
self.assertEqual(404, response.status_code)
bad_team_url = bad_team_url.replace(bad_org, "invalid/course/id")
response = self.client.get(bad_team_url)
self.assertEqual(404, response.status_code)
def get_user_course_specific_teams_list(self):
"""Gets the list of user course specific teams."""
# Create a course two
course_two = CourseFactory.create(
teams_configuration={
"max_team_size": 1,
"topics": [
{
"name": "Test topic for course two",
"id": 1,
"description": "Description for test topic for course two."
}
]
}
)
# Login and enroll user in both course course
self.client.login(username=self.user.username, password=self.test_password)
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
CourseEnrollmentFactory.create(user=self.user, course_id=course_two.id)
# Create teams in both courses
course_one_team = CourseTeamFactory.create(name="Course one team", course_id=self.course.id, topic_id=1)
course_two_team = CourseTeamFactory.create(name="Course two team", course_id=course_two.id, topic_id=1) # pylint: disable=unused-variable
# Check that initially list of user teams in course one is empty
course_one_teams_url = reverse('teams_dashboard', args=[self.course.id])
response = self.client.get(course_one_teams_url)
self.assertIn('"teams": {"count": 0', response.content)
# Add user to a course one team
course_one_team.add_user(self.user)
# Check that list of user teams in course one is not empty, it is one now
response = self.client.get(course_one_teams_url)
self.assertIn('"teams": {"count": 1', response.content)
# Check that list of user teams in course two is still empty
course_two_teams_url = reverse('teams_dashboard', args=[course_two.id])
response = self.client.get(course_two_teams_url)
self.assertIn('"teams": {"count": 0', response.content)
class TeamAPITestCase(APITestCase, SharedModuleStoreTestCase):
"""Base class for Team API test cases."""
test_password = 'password'
@classmethod
def setUpClass(cls):
with super(TeamAPITestCase, cls).setUpClassAndTestData():
teams_configuration_1 = {
'topics':
[
{
'id': 'topic_{}'.format(i),
'name': name,
'description': 'Description for topic {}.'.format(i)
} for i, name in enumerate([u'Sólar power', 'Wind Power', 'Nuclear Power', 'Coal Power'])
]
}
cls.test_course_1 = CourseFactory.create(
org='TestX',
course='TS101',
display_name='Test Course',
teams_configuration=teams_configuration_1
)
teams_configuration_2 = {
'topics':
[
{
'id': 'topic_5',
'name': 'Other Interests',
'description': 'Description for topic 5.'
},
{
'id': 'topic_6',
'name': 'Public Profiles',
'description': 'Description for topic 6.'
},
{
'id': 'Topic_6.5',
'name': 'Test Accessibility Topic',
'description': 'Description for Topic_6.5'
},
],
'max_team_size': 1
}
cls.test_course_2 = CourseFactory.create(
org='MIT',
course='6.002x',
display_name='Circuits',
teams_configuration=teams_configuration_2
)
@classmethod
def setUpTestData(cls):
super(TeamAPITestCase, cls).setUpTestData()
cls.topics_count = 4
cls.users = {
'staff': AdminFactory.create(password=cls.test_password),
'course_staff': StaffFactory.create(course_key=cls.test_course_1.id, password=cls.test_password)
}
cls.create_and_enroll_student(username='student_enrolled')
cls.create_and_enroll_student(username='student_enrolled_not_on_team')
cls.create_and_enroll_student(username='student_unenrolled', courses=[])
# Make this student a community TA.
cls.create_and_enroll_student(username='community_ta')
seed_permissions_roles(cls.test_course_1.id)
community_ta_role = Role.objects.get(name=FORUM_ROLE_COMMUNITY_TA, course_id=cls.test_course_1.id)
community_ta_role.users.add(cls.users['community_ta'])
# This student is enrolled in both test courses and is a member of a team in each course, but is not on the
# same team as student_enrolled.
cls.create_and_enroll_student(
courses=[cls.test_course_1, cls.test_course_2],
username='student_enrolled_both_courses_other_team'
)
# Make this student have a public profile
cls.create_and_enroll_student(
courses=[cls.test_course_2],
username='student_enrolled_public_profile'
)
profile = cls.users['student_enrolled_public_profile'].profile
profile.year_of_birth = 1970
profile.save()
# This student is enrolled in the other course, but not yet a member of a team. This is to allow
# course_2 to use a max_team_size of 1 without breaking other tests on course_1
cls.create_and_enroll_student(
courses=[cls.test_course_2],
username='student_enrolled_other_course_not_on_team'
)
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
cls.solar_team = CourseTeamFactory.create(
name=u'Sólar team',
course_id=cls.test_course_1.id,
topic_id='topic_0'
)
cls.wind_team = CourseTeamFactory.create(name='Wind Team', course_id=cls.test_course_1.id)
cls.nuclear_team = CourseTeamFactory.create(name='Nuclear Team', course_id=cls.test_course_1.id)
cls.another_team = CourseTeamFactory.create(name='Another Team', course_id=cls.test_course_2.id)
cls.public_profile_team = CourseTeamFactory.create(
name='Public Profile Team',
course_id=cls.test_course_2.id,
topic_id='topic_6'
)
cls.search_team = CourseTeamFactory.create(
name='Search',
description='queryable text',
country='GS',
language='to',
course_id=cls.test_course_2.id,
topic_id='topic_7'
)
cls.chinese_team = CourseTeamFactory.create(
name=u'著文企臺個',
description=u'共樣地面較,件展冷不護者這與民教過住意,國制銀產物助音是勢一友',
country='CN',
language='zh_HANS',
course_id=cls.test_course_2.id,
topic_id='topic_7'
)
cls.test_team_name_id_map = {team.name: team for team in (
cls.solar_team,
cls.wind_team,
cls.nuclear_team,
cls.another_team,
cls.public_profile_team,
cls.search_team,
cls.chinese_team,
)}
for user, course in [('staff', cls.test_course_1), ('course_staff', cls.test_course_1)]:
CourseEnrollment.enroll(
cls.users[user], course.id, check_access=True
)
# Django Rest Framework v3 requires us to pass a request to serializers
# that have URL fields. Since we're invoking this code outside the context
# of a request, we need to simulate that there's a request.
cls.solar_team.add_user(cls.users['student_enrolled'])
cls.nuclear_team.add_user(cls.users['student_enrolled_both_courses_other_team'])
cls.another_team.add_user(cls.users['student_enrolled_both_courses_other_team'])
cls.public_profile_team.add_user(cls.users['student_enrolled_public_profile'])
def build_membership_data_raw(self, username, team):
"""Assembles a membership creation payload based on the raw values provided."""
return {'username': username, 'team_id': team}
def build_membership_data(self, username, team):
"""Assembles a membership creation payload based on the username and team model provided."""
return self.build_membership_data_raw(self.users[username].username, team.team_id)
@classmethod
def create_and_enroll_student(cls, courses=None, username=None):
""" Creates a new student and enrolls that student in the course.
Adds the new user to the cls.users dictionary with the username as the key.
Returns the username once the user has been created.
"""
if username is not None:
user = UserFactory.create(password=cls.test_password, username=username)
else:
user = UserFactory.create(password=cls.test_password)
courses = courses if courses is not None else [cls.test_course_1]
for course in courses:
CourseEnrollment.enroll(user, course.id, check_access=True)
cls.users[user.username] = user
return user.username
def login(self, user):
"""Given a user string, logs the given user in.
Used for testing with ddt, which does not have access to self in
decorators. If user is 'student_inactive', then an inactive user will
be both created and logged in.
"""
if user == 'student_inactive':
student_inactive = UserFactory.create(password=self.test_password)
self.client.login(username=student_inactive.username, password=self.test_password)
student_inactive.is_active = False
student_inactive.save()
else:
self.client.login(username=self.users[user].username, password=self.test_password)
def make_call(self, url, expected_status=200, method='get', data=None, content_type=None, **kwargs):
"""Makes a call to the Team API at the given url with method and data.
If a user is specified in kwargs, that user is first logged in.
"""
user = kwargs.pop('user', 'student_enrolled_not_on_team')
if user:
self.login(user)
func = getattr(self.client, method)
if content_type:
response = func(url, data=data, content_type=content_type)
else:
response = func(url, data=data)
self.assertEqual(
expected_status,
response.status_code,
msg="Expected status {expected} but got {actual}: {content}".format(
expected=expected_status,
actual=response.status_code,
content=response.content,
)
)
if expected_status == 200:
return json.loads(response.content)
else:
return response
def get_teams_list(self, expected_status=200, data=None, no_course_id=False, **kwargs):
"""Gets the list of teams as the given user with data as query params. Verifies expected_status."""
data = data if data else {}
if 'course_id' not in data and not no_course_id:
data.update({'course_id': self.test_course_1.id})
return self.make_call(reverse('teams_list'), expected_status, 'get', data, **kwargs)
def get_user_course_specific_teams_list(self):
"""Gets the list of user course specific teams."""
# Create and enroll user in both courses
user = self.create_and_enroll_student(
courses=[self.test_course_1, self.test_course_2],
username='test_user_enrolled_both_courses'
)
course_one_data = {'course_id': self.test_course_1.id, 'username': user}
course_two_data = {'course_id': self.test_course_2.id, 'username': user}
# Check that initially list of user teams in course one is empty
team_list = self.get_teams_list(user=user, expected_status=200, data=course_one_data)
self.assertEqual(team_list['count'], 0)
# Add user to a course one team
self.solar_team.add_user(self.users[user])
# Check that list of user teams in course one is not empty now
team_list = self.get_teams_list(user=user, expected_status=200, data=course_one_data)
self.assertEqual(team_list['count'], 1)
# Check that list of user teams in course two is still empty
team_list = self.get_teams_list(user=user, expected_status=200, data=course_two_data)
self.assertEqual(team_list['count'], 0)
def build_team_data(self, name="Test team", course=None, description="Filler description", **kwargs):
"""Creates the payload for creating a team. kwargs can be used to specify additional fields."""
data = kwargs
course = course if course else self.test_course_1
data.update({
'name': name,
'course_id': str(course.id),
'description': description,
})
return data
def post_create_team(self, expected_status=200, data=None, **kwargs):
"""Posts data to the team creation endpoint. Verifies expected_status."""
return self.make_call(reverse('teams_list'), expected_status, 'post', data, **kwargs)
def get_team_detail(self, team_id, expected_status=200, data=None, **kwargs):
"""Gets detailed team information for team_id. Verifies expected_status."""
return self.make_call(reverse('teams_detail', args=[team_id]), expected_status, 'get', data, **kwargs)
def delete_team(self, team_id, expected_status, **kwargs):
"""Delete the given team. Verifies expected_status."""
return self.make_call(reverse('teams_detail', args=[team_id]), expected_status, 'delete', **kwargs)
def patch_team_detail(self, team_id, expected_status, data=None, **kwargs):
"""Patches the team with team_id using data. Verifies expected_status."""
return self.make_call(
reverse('teams_detail', args=[team_id]),
expected_status,
'patch',
json.dumps(data) if data else None,
'application/merge-patch+json',
**kwargs
)
def get_topics_list(self, expected_status=200, data=None, **kwargs):
"""Gets the list of topics, passing data as query params. Verifies expected_status."""
return self.make_call(reverse('topics_list'), expected_status, 'get', data, **kwargs)
def get_topic_detail(self, topic_id, course_id, expected_status=200, data=None, **kwargs):
"""Gets a single topic, passing data as query params. Verifies expected_status."""
return self.make_call(
reverse('topics_detail', kwargs={'topic_id': topic_id, 'course_id': str(course_id)}),
expected_status,
'get',
data,
**kwargs
)
def get_membership_list(self, expected_status=200, data=None, **kwargs):
"""Gets the membership list, passing data as query params. Verifies expected_status."""
return self.make_call(reverse('team_membership_list'), expected_status, 'get', data, **kwargs)
def post_create_membership(self, expected_status=200, data=None, **kwargs):
"""Posts data to the membership creation endpoint. Verifies expected_status."""
return self.make_call(reverse('team_membership_list'), expected_status, 'post', data, **kwargs)
def get_membership_detail(self, team_id, username, expected_status=200, data=None, **kwargs):
"""Gets an individual membership record, passing data as query params. Verifies expected_status."""
return self.make_call(
reverse('team_membership_detail', args=[team_id, username]),
expected_status,
'get',
data,
**kwargs
)
def delete_membership(self, team_id, username, expected_status=200, **kwargs):
"""Deletes an individual membership record. Verifies expected_status."""
url = reverse('team_membership_detail', args=[team_id, username]) + '?admin=true'
return self.make_call(url, expected_status, 'delete', **kwargs)
def verify_expanded_public_user(self, user):
"""Verifies that fields exist on the returned user json indicating that it is expanded."""
for field in ['username', 'url', 'bio', 'country', 'profile_image', 'time_zone', 'language_proficiencies']:
self.assertIn(field, user)
def verify_expanded_private_user(self, user):
"""Verifies that fields exist on the returned user json indicating that it is expanded."""
for field in ['username', 'url', 'profile_image']:
self.assertIn(field, user)
for field in ['bio', 'country', 'time_zone', 'language_proficiencies']:
self.assertNotIn(field, user)
def verify_expanded_team(self, team):
"""Verifies that fields exist on the returned team json indicating that it is expanded."""
for field in ['id', 'name', 'course_id', 'topic_id', 'date_created', 'description']:
self.assertIn(field, team)
@ddt.ddt
class TestListTeamsAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team listing API endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestListTeamsAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
teams = self.get_teams_list(user=user, expected_status=status)
if status == 200:
self.assertEqual(3, teams['count'])
def test_missing_course_id(self):
self.get_teams_list(400, no_course_id=True)
def verify_names(self, data, status, names=None, **kwargs):
"""Gets a team listing with data as query params, verifies status, and then verifies team names if specified."""
teams = self.get_teams_list(data=data, expected_status=status, **kwargs)
if names is not None and 200 <= status < 300:
results = teams['results']
self.assertEqual(names, [team['name'] for team in results])
def test_filter_invalid_course_id(self):
self.verify_names({'course_id': 'no_such_course'}, 400)
def test_filter_course_id(self):
self.verify_names(
{'course_id': self.test_course_2.id},
200,
['Another Team', 'Public Profile Team', 'Search', u'著文企臺個'],
user='staff'
)
def test_filter_topic_id(self):
self.verify_names({'course_id': self.test_course_1.id, 'topic_id': 'topic_0'}, 200, [u'Sólar team'])
def test_filter_username(self):
self.verify_names({'course_id': self.test_course_1.id, 'username': 'student_enrolled'}, 200, [u'Sólar team'])
self.verify_names({'course_id': self.test_course_1.id, 'username': 'staff'}, 200, [])
@ddt.data(
(None, 200, ['Nuclear Team', u'Sólar team', 'Wind Team']),
('name', 200, ['Nuclear Team', u'Sólar team', 'Wind Team']),
# Note that "Nuclear Team" and "Solar team" have the same open_slots.
# "Solar team" comes first due to secondary sort by last_activity_at.
('open_slots', 200, ['Wind Team', u'Sólar team', 'Nuclear Team']),
# Note that "Wind Team" and "Nuclear Team" have the same last_activity_at.
# "Wind Team" comes first due to secondary sort by open_slots.
('last_activity_at', 200, [u'Sólar team', 'Wind Team', 'Nuclear Team']),
)
@ddt.unpack
def test_order_by(self, field, status, names):
# Make "Solar team" the most recently active team.
# The CourseTeamFactory sets the last_activity_at to a fixed time (in the past), so all of the
# other teams have the same last_activity_at.
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
solar_team = self.test_team_name_id_map[u'Sólar team']
solar_team.last_activity_at = datetime.utcnow().replace(tzinfo=pytz.utc)
solar_team.save()
data = {'order_by': field} if field else {}
self.verify_names(data, status, names)
def test_order_by_with_text_search(self):
data = {'order_by': 'name', 'text_search': 'search'}
self.verify_names(data, 400, [])
self.assert_no_events_were_emitted()
@ddt.data((404, {'course_id': 'no/such/course'}), (400, {'topic_id': 'no_such_topic'}))
@ddt.unpack
def test_no_results(self, status, data):
self.get_teams_list(status, data)
def test_page_size(self):
result = self.get_teams_list(200, {'page_size': 2})
self.assertEquals(2, result['num_pages'])
def test_page(self):
result = self.get_teams_list(200, {'page_size': 1, 'page': 3})
self.assertEquals(3, result['num_pages'])
self.assertIsNone(result['next'])
self.assertIsNotNone(result['previous'])
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_teams_list(200, {'expand': 'user', 'topic_id': 'topic_0'})
self.verify_expanded_private_user(result['results'][0]['membership'][0]['user'])
def test_expand_public_user(self):
result = self.get_teams_list(
200,
{
'expand': 'user',
'topic_id': 'topic_6',
'course_id': self.test_course_2.id
},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['results'][0]['membership'][0]['user'])
@ddt.data(
('search', ['Search']),
('queryable', ['Search']),
('Tonga', ['Search']),
('Island', ['Search']),
('not-a-query', []),
('team', ['Another Team', 'Public Profile Team']),
(u'著文企臺個', [u'著文企臺個']),
)
@ddt.unpack
def test_text_search(self, text_search, expected_team_names):
def reset_search_index():
"""Clear out the search index and reindex the teams."""
CourseTeamIndexer.engine().destroy()
for team in self.test_team_name_id_map.values():
CourseTeamIndexer.index(team)
reset_search_index()
self.verify_names(
{'course_id': self.test_course_2.id, 'text_search': text_search},
200,
expected_team_names,
user='student_enrolled_public_profile'
)
self.assert_event_emitted(
'edx.team.searched',
search_text=text_search,
topic_id=None,
number_of_results=len(expected_team_names)
)
# Verify that the searches still work for a user from a different locale
with translation.override('ar'):
reset_search_index()
self.verify_names(
{'course_id': self.test_course_2.id, 'text_search': text_search},
200,
expected_team_names,
user='student_enrolled_public_profile'
)
def test_delete_removed_from_search(self):
team = CourseTeamFactory.create(
name=u'zoinks',
course_id=self.test_course_1.id,
topic_id='topic_0'
)
self.verify_names(
{'course_id': self.test_course_1.id, 'text_search': 'zoinks'},
200,
[team.name],
user='staff'
)
team.delete()
self.verify_names(
{'course_id': self.test_course_1.id, 'text_search': 'zoinks'},
200,
[],
user='staff'
)
@ddt.ddt
class TestCreateTeamAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team creation endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestCreateTeamAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled_not_on_team', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
team = self.post_create_team(status, self.build_team_data(name="New Team"), user=user)
if status == 200:
self.verify_expected_team_id(team, 'new-team')
teams = self.get_teams_list(user=user)
self.assertIn("New Team", [team['name'] for team in teams['results']])
def _expected_team_id(self, team, expected_prefix):
""" Return the team id that we'd expect given this team data and this prefix. """
return expected_prefix + '-' + team['discussion_topic_id']
def verify_expected_team_id(self, team, expected_prefix):
""" Verifies that the team id starts with the specified prefix and ends with the discussion_topic_id """
self.assertIn('id', team)
self.assertIn('discussion_topic_id', team)
self.assertEqual(team['id'], self._expected_team_id(team, expected_prefix))
def test_naming(self):
new_teams = [
self.post_create_team(data=self.build_team_data(name=name), user=self.create_and_enroll_student())
for name in ["The Best Team", "The Best Team", "A really long team name"]
]
# Check that teams with the same name have unique IDs.
self.verify_expected_team_id(new_teams[0], 'the-best-team')
self.verify_expected_team_id(new_teams[1], 'the-best-team')
self.assertNotEqual(new_teams[0]['id'], new_teams[1]['id'])
# Verify expected truncation behavior with names > 20 characters.
self.verify_expected_team_id(new_teams[2], 'a-really-long-team-n')
@ddt.data((400, {
'name': 'Bad Course ID',
'course_id': 'no_such_course',
'description': "Filler Description"
}), (404, {
'name': "Non-existent course ID",
'course_id': 'no/such/course',
'description': "Filler Description"
}))
@ddt.unpack
def test_bad_course_data(self, status, data):
self.post_create_team(status, data)
def test_student_in_team(self):
response = self.post_create_team(
400,
data=self.build_team_data(
name="Doomed team",
course=self.test_course_1,
description="Overly ambitious student"
),
user='student_enrolled'
)
self.assertEqual(
"You are already in a team in this course.",
json.loads(response.content)["user_message"]
)
@ddt.data('staff', 'course_staff', 'community_ta')
def test_privileged_create_multiple_teams(self, user):
""" Privileged users can create multiple teams, even if they are already in one. """
# First add the privileged user to a team.
self.post_create_membership(
200,
self.build_membership_data(user, self.solar_team),
user=user
)
self.post_create_team(
data=self.build_team_data(
name="Another team",
course=self.test_course_1,
description="Privileged users are the best"
),
user=user
)
@ddt.data({'description': ''}, {'name': 'x' * 1000}, {'name': ''})
def test_bad_fields(self, kwargs):
self.post_create_team(400, self.build_team_data(**kwargs))
def test_missing_name(self):
self.post_create_team(400, {
'course_id': str(self.test_course_1.id),
'description': "foobar"
})
def test_full_student_creator(self):
creator = self.create_and_enroll_student()
team = self.post_create_team(data=self.build_team_data(
name="Fully specified team",
course=self.test_course_1,
description="Another fantastic team",
topic_id='great-topic',
country='CA',
language='fr'
), user=creator)
# Verify the id (it ends with a unique hash, which is the same as the discussion_id).
self.verify_expected_team_id(team, 'fully-specified-team')
del team['id']
self.assert_event_emitted(
'edx.team.created',
team_id=self._expected_team_id(team, 'fully-specified-team'),
)
self.assert_event_emitted(
'edx.team.learner_added',
team_id=self._expected_team_id(team, 'fully-specified-team'),
user_id=self.users[creator].id,
add_method='added_on_create'
)
# Remove date_created and discussion_topic_id because they change between test runs
del team['date_created']
del team['discussion_topic_id']
# Since membership is its own list, we want to examine this separately.
team_membership = team['membership']
del team['membership']
# verify that it's been set to a time today.
self.assertEqual(
parser.parse(team['last_activity_at']).date(),
datetime.utcnow().replace(tzinfo=pytz.utc).date()
)
del team['last_activity_at']
# Verify that the creating user gets added to the team.
self.assertEqual(len(team_membership), 1)
member = team_membership[0]['user']
self.assertEqual(member['username'], creator)
self.assertEqual(team, {
'name': 'Fully specified team',
'language': 'fr',
'country': 'CA',
'topic_id': 'great-topic',
'course_id': str(self.test_course_1.id),
'description': 'Another fantastic team'
})
@ddt.data('staff', 'course_staff', 'community_ta')
def test_membership_staff_creator(self, user):
# Verify that staff do not automatically get added to a team
# when they create one.
team = self.post_create_team(data=self.build_team_data(
name="New team",
course=self.test_course_1,
description="Another fantastic team",
), user=user)
self.assertEqual(team['membership'], [])
@ddt.ddt
class TestDetailTeamAPI(TeamAPITestCase):
"""Test cases for the team detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
team = self.get_team_detail(self.solar_team.team_id, status, user=user)
if status == 200:
self.assertEqual(team['description'], self.solar_team.description)
self.assertEqual(team['discussion_topic_id'], self.solar_team.discussion_topic_id)
self.assertEqual(parser.parse(team['last_activity_at']), LAST_ACTIVITY_AT)
def test_does_not_exist(self):
self.get_team_detail('no_such_team', 404)
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_team_detail(self.solar_team.team_id, 200, {'expand': 'user'})
self.verify_expanded_private_user(result['membership'][0]['user'])
def test_expand_public_user(self):
result = self.get_team_detail(
self.public_profile_team.team_id,
200,
{'expand': 'user'},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['membership'][0]['user'])
@ddt.ddt
class TestDeleteTeamAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team delete endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestDeleteTeamAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 403),
('staff', 204),
('course_staff', 204),
('community_ta', 204)
)
@ddt.unpack
def test_access(self, user, status):
self.delete_team(self.solar_team.team_id, status, user=user)
if status == 204:
self.assert_event_emitted(
'edx.team.deleted',
team_id=self.solar_team.team_id,
)
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
remove_method='team_deleted',
user_id=self.users['student_enrolled'].id
)
def test_does_not_exist(self):
self.delete_team('nonexistent', 404)
def test_memberships_deleted(self):
self.assertEqual(CourseTeamMembership.objects.filter(team=self.solar_team).count(), 1)
self.delete_team(self.solar_team.team_id, 204, user='staff')
self.assert_event_emitted(
'edx.team.deleted',
team_id=self.solar_team.team_id,
)
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
remove_method='team_deleted',
user_id=self.users['student_enrolled'].id
)
self.assertEqual(CourseTeamMembership.objects.filter(team=self.solar_team).count(), 0)
@ddt.ddt
class TestUpdateTeamAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team update endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestUpdateTeamAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 403),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
prev_name = self.solar_team.name
team = self.patch_team_detail(self.solar_team.team_id, status, {'name': 'foo'}, user=user)
if status == 200:
self.assertEquals(team['name'], 'foo')
self.assert_event_emitted(
'edx.team.changed',
team_id=self.solar_team.team_id,
truncated=[],
field='name',
old=prev_name,
new='foo'
)
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled', 404),
('staff', 404),
('course_staff', 404),
('community_ta', 404),
)
@ddt.unpack
def test_access_bad_id(self, user, status):
self.patch_team_detail("no_such_team", status, {'name': 'foo'}, user=user)
@ddt.data(
('id', 'foobar'),
('description', ''),
('country', 'no_such_country'),
('language', 'no_such_language')
)
@ddt.unpack
def test_bad_requests(self, key, value):
self.patch_team_detail(self.solar_team.team_id, 400, {key: value}, user='staff')
@ddt.data(('country', 'US'), ('language', 'en'), ('foo', 'bar'))
@ddt.unpack
def test_good_requests(self, key, value):
if hasattr(self.solar_team, key):
prev_value = getattr(self.solar_team, key)
self.patch_team_detail(self.solar_team.team_id, 200, {key: value}, user='staff')
if hasattr(self.solar_team, key):
self.assert_event_emitted(
'edx.team.changed',
team_id=self.solar_team.team_id,
truncated=[],
field=key,
old=prev_value,
new=value
)
def test_does_not_exist(self):
self.patch_team_detail('no_such_team', 404, user='staff')
@ddt.ddt
class TestListTopicsAPI(TeamAPITestCase):
"""Test cases for the topic listing endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
topics = self.get_topics_list(status, {'course_id': self.test_course_1.id}, user=user)
if status == 200:
self.assertEqual(topics['count'], self.topics_count)
@ddt.data('A+BOGUS+COURSE', 'A/BOGUS/COURSE')
def test_invalid_course_key(self, course_id):
self.get_topics_list(404, {'course_id': course_id})
def test_without_course_id(self):
self.get_topics_list(400)
@ddt.data(
(None, 200, ['Coal Power', 'Nuclear Power', u'Sólar power', 'Wind Power'], 'name'),
('name', 200, ['Coal Power', 'Nuclear Power', u'Sólar power', 'Wind Power'], 'name'),
# Note that "Nuclear Power" and "Solar power" both have 2 teams. "Coal Power" and "Window Power"
# both have 0 teams. The secondary sort is alphabetical by name.
('team_count', 200, ['Nuclear Power', u'Sólar power', 'Coal Power', 'Wind Power'], 'team_count'),
('no_such_field', 400, [], None),
)
@ddt.unpack
def test_order_by(self, field, status, names, expected_ordering):
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
# Add 2 teams to "Nuclear Power", which previously had no teams.
CourseTeamFactory.create(
name=u'Nuclear Team 1', course_id=self.test_course_1.id, topic_id='topic_2'
)
CourseTeamFactory.create(
name=u'Nuclear Team 2', course_id=self.test_course_1.id, topic_id='topic_2'
)
data = {'course_id': self.test_course_1.id}
if field:
data['order_by'] = field
topics = self.get_topics_list(status, data)
if status == 200:
self.assertEqual(names, [topic['name'] for topic in topics['results']])
self.assertEqual(topics['sort_order'], expected_ordering)
def test_order_by_team_count_secondary(self):
"""
Ensure that the secondary sort (alphabetical) when primary sort is team_count
works across pagination boundaries.
"""
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
# Add 2 teams to "Wind Power", which previously had no teams.
CourseTeamFactory.create(
name=u'Wind Team 1', course_id=self.test_course_1.id, topic_id='topic_1'
)
CourseTeamFactory.create(
name=u'Wind Team 2', course_id=self.test_course_1.id, topic_id='topic_1'
)
topics = self.get_topics_list(data={
'course_id': self.test_course_1.id,
'page_size': 2,
'page': 1,
'order_by': 'team_count'
})
self.assertEqual(["Wind Power", u'Sólar power'], [topic['name'] for topic in topics['results']])
topics = self.get_topics_list(data={
'course_id': self.test_course_1.id,
'page_size': 2,
'page': 2,
'order_by': 'team_count'
})
self.assertEqual(["Coal Power", "Nuclear Power"], [topic['name'] for topic in topics['results']])
def test_pagination(self):
response = self.get_topics_list(data={
'course_id': self.test_course_1.id,
'page_size': 2,
})
self.assertEqual(2, len(response['results']))
self.assertIn('next', response)
self.assertIn('previous', response)
self.assertIsNone(response['previous'])
self.assertIsNotNone(response['next'])
def test_default_ordering(self):
response = self.get_topics_list(data={'course_id': self.test_course_1.id})
self.assertEqual(response['sort_order'], 'name')
def test_team_count(self):
"""Test that team_count is included for each topic"""
response = self.get_topics_list(data={'course_id': self.test_course_1.id})
for topic in response['results']:
self.assertIn('team_count', topic)
if topic['id'] == u'topic_0':
self.assertEqual(topic['team_count'], 1)
else:
self.assertEqual(topic['team_count'], 0)
@ddt.ddt
class TestDetailTopicAPI(TeamAPITestCase):
"""Test cases for the topic detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
topic = self.get_topic_detail('topic_0', self.test_course_1.id, status, user=user)
if status == 200:
for field in ('id', 'name', 'description'):
self.assertIn(field, topic)
@ddt.data('A+BOGUS+COURSE', 'A/BOGUS/COURSE')
def test_invalid_course_id(self, course_id):
self.get_topic_detail('topic_0', course_id, 404)
def test_invalid_topic_id(self):
self.get_topic_detail('no_such_topic', self.test_course_1.id, 404)
def test_topic_detail_with_caps_and_dot_in_id(self):
self.get_topic_detail('Topic_6.5', self.test_course_2.id, user='student_enrolled_public_profile')
def test_team_count(self):
"""Test that team_count is included with a topic"""
topic = self.get_topic_detail(topic_id='topic_0', course_id=self.test_course_1.id)
self.assertEqual(topic['team_count'], 1)
topic = self.get_topic_detail(topic_id='topic_1', course_id=self.test_course_1.id)
self.assertEqual(topic['team_count'], 0)
@ddt.ddt
class TestListMembershipAPI(TeamAPITestCase):
"""Test cases for the membership list endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled', 200),
('student_enrolled_both_courses_other_team', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
membership = self.get_membership_list(status, {'team_id': self.solar_team.team_id}, user=user)
if status == 200:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['user']['username'], self.users['student_enrolled'].username)
@ddt.data(
(None, 401, False),
('student_inactive', 401, False),
('student_unenrolled', 200, False),
('student_enrolled', 200, True),
('student_enrolled_both_courses_other_team', 200, True),
('staff', 200, True),
('course_staff', 200, True),
('community_ta', 200, True),
)
@ddt.unpack
def test_access_by_username(self, user, status, has_content):
membership = self.get_membership_list(status, {'username': self.users['student_enrolled'].username}, user=user)
if status == 200:
if has_content:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['team']['team_id'], self.solar_team.team_id)
else:
self.assertEqual(membership['count'], 0)
@ddt.data(
('student_enrolled_both_courses_other_team', 'TestX/TS101/Test_Course', 200, 'Nuclear Team'),
('student_enrolled_both_courses_other_team', 'MIT/6.002x/Circuits', 200, 'Another Team'),
('student_enrolled', 'TestX/TS101/Test_Course', 200, u'Sólar team'),
('student_enrolled', 'MIT/6.002x/Circuits', 400, ''),
)
@ddt.unpack
def test_course_filter_with_username(self, user, course_id, status, team_name):
membership = self.get_membership_list(
status,
{
'username': self.users[user],
'course_id': course_id
},
user=user
)
if status == 200:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['team']['team_id'], self.test_team_name_id_map[team_name].team_id)
@ddt.data(
('TestX/TS101/Test_Course', 200),
('MIT/6.002x/Circuits', 400),
)
@ddt.unpack
def test_course_filter_with_team_id(self, course_id, status):
membership = self.get_membership_list(status, {'team_id': self.solar_team.team_id, 'course_id': course_id})
if status == 200:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['team']['team_id'], self.solar_team.team_id)
def test_bad_course_id(self):
self.get_membership_list(404, {'course_id': 'no_such_course'})
def test_no_username_or_team_id(self):
self.get_membership_list(400, {})
def test_bad_team_id(self):
self.get_membership_list(404, {'team_id': 'no_such_team'})
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_membership_list(200, {'team_id': self.solar_team.team_id, 'expand': 'user'})
self.verify_expanded_private_user(result['results'][0]['user'])
def test_expand_public_user(self):
result = self.get_membership_list(
200,
{'team_id': self.public_profile_team.team_id, 'expand': 'user'},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['results'][0]['user'])
def test_expand_team(self):
result = self.get_membership_list(200, {'team_id': self.solar_team.team_id, 'expand': 'team'})
self.verify_expanded_team(result['results'][0]['team'])
@ddt.ddt
class TestCreateMembershipAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the membership creation endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestCreateMembershipAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 200),
('student_enrolled', 404),
('student_enrolled_both_courses_other_team', 404),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
membership = self.post_create_membership(
status,
self.build_membership_data('student_enrolled_not_on_team', self.solar_team),
user=user
)
if status == 200:
self.assertEqual(membership['user']['username'], self.users['student_enrolled_not_on_team'].username)
self.assertEqual(membership['team']['team_id'], self.solar_team.team_id)
memberships = self.get_membership_list(200, {'team_id': self.solar_team.team_id})
self.assertEqual(memberships['count'], 2)
add_method = 'joined_from_team_view' if user == 'student_enrolled_not_on_team' else 'added_by_another_user'
self.assert_event_emitted(
'edx.team.learner_added',
team_id=self.solar_team.team_id,
user_id=self.users['student_enrolled_not_on_team'].id,
add_method=add_method
)
else:
self.assert_no_events_were_emitted()
def test_no_username(self):
response = self.post_create_membership(400, {'team_id': self.solar_team.team_id})
self.assertIn('username', json.loads(response.content)['field_errors'])
def test_no_team(self):
response = self.post_create_membership(400, {'username': self.users['student_enrolled_not_on_team'].username})
self.assertIn('team_id', json.loads(response.content)['field_errors'])
def test_bad_team(self):
self.post_create_membership(
404,
self.build_membership_data_raw(self.users['student_enrolled'].username, 'no_such_team')
)
def test_bad_username(self):
self.post_create_membership(
404,
self.build_membership_data_raw('no_such_user', self.solar_team.team_id),
user='staff'
)
@ddt.data('student_enrolled', 'staff', 'course_staff')
def test_join_twice(self, user):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled', self.solar_team),
user=user
)
self.assertIn('already a member', json.loads(response.content)['developer_message'])
def test_join_second_team_in_course(self):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled_both_courses_other_team', self.solar_team),
user='student_enrolled_both_courses_other_team'
)
self.assertIn('already a member', json.loads(response.content)['developer_message'])
@ddt.data('staff', 'course_staff')
def test_not_enrolled_in_team_course(self, user):
response = self.post_create_membership(
400,
self.build_membership_data('student_unenrolled', self.solar_team),
user=user
)
self.assertIn('not enrolled', json.loads(response.content)['developer_message'])
def test_over_max_team_size_in_course_2(self):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled_other_course_not_on_team', self.another_team),
user='student_enrolled_other_course_not_on_team'
)
self.assertIn('full', json.loads(response.content)['developer_message'])
@ddt.ddt
class TestDetailMembershipAPI(TeamAPITestCase):
"""Test cases for the membership detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 200),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled'].username,
status,
user=user
)
def test_bad_team(self):
self.get_membership_detail('no_such_team', self.users['student_enrolled'].username, 404)
def test_bad_username(self):
self.get_membership_detail(self.solar_team.team_id, 'no_such_user', 404)
def test_no_membership(self):
self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled_not_on_team'].username,
404
)
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled'].username,
200,
{'expand': 'user'}
)
self.verify_expanded_private_user(result['user'])
def test_expand_public_user(self):
result = self.get_membership_detail(
self.public_profile_team.team_id,
self.users['student_enrolled_public_profile'].username,
200,
{'expand': 'user'},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['user'])
def test_expand_team(self):
result = self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled'].username,
200,
{'expand': 'team'}
)
self.verify_expanded_team(result['team'])
@ddt.ddt
class TestDeleteMembershipAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the membership deletion endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestDeleteMembershipAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 404),
('student_enrolled', 204),
('staff', 204),
('course_staff', 204),
('community_ta', 204),
)
@ddt.unpack
def test_access(self, user, status):
self.delete_membership(
self.solar_team.team_id,
self.users['student_enrolled'].username,
status,
user=user
)
if status == 204:
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
user_id=self.users['student_enrolled'].id,
remove_method='removed_by_admin'
)
else:
self.assert_no_events_were_emitted()
def test_leave_team(self):
"""
The key difference between this test and test_access above is that
removal via "Edit Membership" and "Leave Team" emit different events
despite hitting the same API endpoint, due to the 'admin' query string.
"""
url = reverse('team_membership_detail', args=[self.solar_team.team_id, self.users['student_enrolled'].username])
self.make_call(url, 204, 'delete', user='student_enrolled')
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
user_id=self.users['student_enrolled'].id,
remove_method='self_removal'
)
def test_bad_team(self):
self.delete_membership('no_such_team', self.users['student_enrolled'].username, 404)
def test_bad_username(self):
self.delete_membership(self.solar_team.team_id, 'no_such_user', 404)
def test_missing_membership(self):
self.delete_membership(self.wind_team.team_id, self.users['student_enrolled'].username, 404)
class TestElasticSearchErrors(TeamAPITestCase):
"""Test that the Team API is robust to Elasticsearch connection errors."""
ES_ERROR = ConnectionError('N/A', 'connection error', {})
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_list_teams(self, __):
"""Test that text searches return a 503 when Elasticsearch is down.
The endpoint should still return 200 when a search is not supplied."""
self.get_teams_list(
expected_status=503,
data={'course_id': self.test_course_1.id, 'text_search': 'zoinks'},
user='staff'
)
self.get_teams_list(
expected_status=200,
data={'course_id': self.test_course_1.id},
user='staff'
)
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_create_team(self, __):
"""Test that team creation is robust to Elasticsearch errors."""
self.post_create_team(
expected_status=200,
data=self.build_team_data(name='zoinks'),
user='staff'
)
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_delete_team(self, __):
"""Test that team deletion is robust to Elasticsearch errors."""
self.delete_team(self.wind_team.team_id, 204, user='staff')
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_patch_team(self, __):
"""Test that team updates are robust to Elasticsearch errors."""
self.patch_team_detail(
self.wind_team.team_id,
200,
data={'description': 'new description'},
user='staff'
)
|
agpl-3.0
|
pu239ppy/graphite-web
|
webapp/graphite/render/datalib.py
|
3
|
5118
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from graphite.logger import log
from graphite.storage import STORE
from graphite.readers import FetchInProgress
from django.conf import settings
from graphite.util import epoch
from traceback import format_exc
class TimeSeries(list):
def __init__(self, name, start, end, step, values, consolidate='average'):
list.__init__(self, values)
self.name = name
self.start = start
self.end = end
self.step = step
self.consolidationFunc = consolidate
self.valuesPerPoint = 1
self.options = {}
def __iter__(self):
if self.valuesPerPoint > 1:
return self.__consolidatingGenerator( list.__iter__(self) )
else:
return list.__iter__(self)
def consolidate(self, valuesPerPoint):
self.valuesPerPoint = int(valuesPerPoint)
def __consolidatingGenerator(self, gen):
buf = []
for x in gen:
buf.append(x)
if len(buf) == self.valuesPerPoint:
while None in buf: buf.remove(None)
if buf:
yield self.__consolidate(buf)
buf = []
else:
yield None
while None in buf: buf.remove(None)
if buf: yield self.__consolidate(buf)
else: yield None
raise StopIteration
def __consolidate(self, values):
usable = [v for v in values if v is not None]
if not usable: return None
if self.consolidationFunc == 'sum':
return sum(usable)
if self.consolidationFunc == 'average':
return float(sum(usable)) / len(usable)
if self.consolidationFunc == 'max':
return max(usable)
if self.consolidationFunc == 'min':
return min(usable)
raise Exception("Invalid consolidation function: '%s'" % self.consolidationFunc)
def __repr__(self):
return 'TimeSeries(name=%s, start=%s, end=%s, step=%s)' % (self.name, self.start, self.end, self.step)
def getInfo(self):
"""Pickle-friendly representation of the series"""
return {
'name' : self.name,
'start' : self.start,
'end' : self.end,
'step' : self.step,
'values' : list(self),
}
# Data retrieval API
def fetchData(requestContext, pathExpr):
seriesList = []
startTime = int( epoch( requestContext['startTime'] ) )
endTime = int( epoch( requestContext['endTime'] ) )
def _fetchData(pathExpr,startTime, endTime, requestContext, seriesList):
matching_nodes = STORE.find(pathExpr, startTime, endTime, local=requestContext['localOnly'])
fetches = [(node, node.fetch(startTime, endTime)) for node in matching_nodes if node.is_leaf]
for node, results in fetches:
if isinstance(results, FetchInProgress):
results = results.waitForResults()
if not results:
log.info("render.datalib.fetchData :: no results for %s.fetch(%s, %s)" % (node, startTime, endTime))
continue
try:
(timeInfo, values) = results
except ValueError as e:
raise Exception("could not parse timeInfo/values from metric '%s': %s" % (node.path, e))
(start, end, step) = timeInfo
series = TimeSeries(node.path, start, end, step, values)
series.pathExpression = pathExpr #hack to pass expressions through to render functions
seriesList.append(series)
# Prune empty series with duplicate metric paths to avoid showing empty graph elements for old whisper data
names = set([ s.name for s in seriesList ])
for name in names:
series_with_duplicate_names = [ s for s in seriesList if s.name == name ]
empty_duplicates = [ s for s in series_with_duplicate_names if not nonempty(s) ]
if series_with_duplicate_names == empty_duplicates and len(empty_duplicates) > 0: # if they're all empty
empty_duplicates.pop() # make sure we leave one in seriesList
for series in empty_duplicates:
seriesList.remove(series)
return seriesList
retries = 1 # start counting at one to make log output and settings more readable
while True:
try:
seriesList = _fetchData(pathExpr,startTime, endTime, requestContext, seriesList)
return seriesList
except Exception, e:
if retries >= settings.MAX_FETCH_RETRIES:
log.exception("Failed after %s retry! Root cause:\n%s" %
(settings.MAX_FETCH_RETRIES, format_exc()))
raise e
else:
log.exception("Got an exception when fetching data! Try: %i of %i. Root cause:\n%s" %
(retries, settings.MAX_FETCH_RETRIES, format_exc()))
retries += 1
def nonempty(series):
for value in series:
if value is not None:
return True
return False
|
apache-2.0
|
scrollback/kuma
|
vendor/packages/ipython/IPython/testing/parametric.py
|
7
|
1796
|
"""Parametric testing on top of twisted.trial.unittest.
"""
__all__ = ['parametric','Parametric']
from twisted.trial.unittest import TestCase
def partial(f, *partial_args, **partial_kwargs):
"""Generate a partial class method.
"""
def partial_func(self, *args, **kwargs):
dikt = dict(kwargs)
dikt.update(partial_kwargs)
return f(self, *(partial_args+args), **dikt)
return partial_func
def parametric(f):
"""Mark f as a parametric test.
"""
f._parametric = True
return classmethod(f)
def Parametric(cls):
"""Register parametric tests with a class.
"""
# Walk over all tests marked with @parametric
test_generators = [getattr(cls,f) for f in dir(cls)
if f.startswith('test')]
test_generators = [m for m in test_generators if hasattr(m,'_parametric')]
for test_gen in test_generators:
test_name = test_gen.func_name
# Insert a new test for each parameter
for n,test_and_params in enumerate(test_gen()):
test_method = test_and_params[0]
test_params = test_and_params[1:]
# Here we use partial (defined above), which returns a
# class method of type ``types.FunctionType``, unlike
# functools.partial which returns a function of type
# ``functools.partial``.
partial_func = partial(test_method,*test_params)
# rename the test to look like a testcase
partial_func.__name__ = 'test_' + partial_func.__name__
# insert the new function into the class as a test
setattr(cls, test_name + '_%s' % n, partial_func)
# rename test generator so it isn't called again by nose
test_gen.im_func.func_name = '__done_' + test_name
|
mpl-2.0
|
BenevolentAI/guacamol
|
tests/utils/test_chemistry.py
|
1
|
2868
|
from guacamol.utils.chemistry import canonicalize, canonicalize_list, is_valid, \
calculate_internal_pairwise_similarities, calculate_pairwise_similarities, parse_molecular_formula
def test_validity_empty_molecule():
smiles = ''
assert not is_valid(smiles)
def test_validity_incorrect_syntax():
smiles = 'CCCincorrectsyntaxCCC'
assert not is_valid(smiles)
def test_validity_incorrect_valence():
smiles = 'CCC(CC)(CC)(=O)CCC'
assert not is_valid(smiles)
def test_validity_correct_molecules():
smiles_1 = 'O'
smiles_2 = 'C'
smiles_3 = 'CC(ONONOC)CCCc1ccccc1'
assert is_valid(smiles_1)
assert is_valid(smiles_2)
assert is_valid(smiles_3)
def test_isomeric_canonicalisation():
endiandric_acid = r'OC(=O)[C@H]5C2\C=C/C3[C@@H]5CC4[C@H](C\C=C\C=C\c1ccccc1)[C@@H]2[C@@H]34'
with_stereocenters = canonicalize(endiandric_acid, include_stereocenters=True)
without_stereocenters = canonicalize(endiandric_acid, include_stereocenters=False)
expected_with_stereocenters = 'O=C(O)[C@H]1C2C=CC3[C@@H]1CC1[C@H](C/C=C/C=C/c4ccccc4)[C@@H]2[C@@H]31'
expected_without_stereocenters = 'O=C(O)C1C2C=CC3C1CC1C(CC=CC=Cc4ccccc4)C2C31'
assert with_stereocenters == expected_with_stereocenters
assert without_stereocenters == expected_without_stereocenters
def test_list_canonicalization_removes_none():
m1 = 'CCC(OCOCO)CC(=O)NCC'
m2 = 'this.is.not.a.molecule'
m3 = 'c1ccccc1'
m4 = 'CC(OCON=N)CC'
molecules = [m1, m2, m3, m4]
canonicalized_molecules = canonicalize_list(molecules)
valid_molecules = [m1, m3, m4]
expected = [canonicalize(smiles) for smiles in valid_molecules]
assert canonicalized_molecules == expected
def test_internal_sim():
molz = ['OCCCF', 'c1cc(F)ccc1', 'c1cnc(CO)cc1', 'FOOF']
sim = calculate_internal_pairwise_similarities(molz)
assert sim.shape[0] == 4
assert sim.shape[1] == 4
# check elements
for i in range(sim.shape[0]):
for j in range(sim.shape[1]):
assert sim[i, j] == sim[j, i]
if i != j:
assert sim[i, j] < 1.0
else:
assert sim[i, j] == 0
def test_external_sim():
molz1 = ['OCCCF', 'c1cc(F)ccc1', 'c1cnc(CO)cc1', 'FOOF']
molz2 = ['c1cc(Cl)ccc1', '[Cr][Ac][K]', '[Ca](F)[Fe]']
sim = calculate_pairwise_similarities(molz1, molz2)
assert sim.shape[0] == 4
assert sim.shape[1] == 3
# check elements
for i in range(sim.shape[0]):
for j in range(sim.shape[1]):
assert sim[i, j] < 1.0
def test_parse_molecular_formula():
formula = 'C6H9NOF2Cl2Br'
parsed = parse_molecular_formula(formula)
expected = [
('C', 6),
('H', 9),
('N', 1),
('O', 1),
('F', 2),
('Cl', 2),
('Br', 1)
]
assert parsed == expected
|
mit
|
pniedzielski/fb-hackathon-2013-11-21
|
src/repl.it/jsrepl/extern/python/unclosured/lib/python2.7/wsgiref/simple_server.py
|
177
|
4743
|
"""BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21)
This is both an example of how WSGI can be implemented, and a basis for running
simple web applications on a local machine, such as might be done when testing
or debugging an application. It has not been reviewed for security issues,
however, and we strongly recommend that you use a "real" web server for
production use.
For example usage, see the 'if __name__=="__main__"' block at the end of the
module. See also the BaseHTTPServer module docs for other API information.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urllib, sys
from wsgiref.handlers import SimpleHandler
__version__ = "0.1"
__all__ = ['WSGIServer', 'WSGIRequestHandler', 'demo_app', 'make_server']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class ServerHandler(SimpleHandler):
server_software = software_version
def close(self):
try:
self.request_handler.log_request(
self.status.split(' ',1)[0], self.bytes_sent
)
finally:
SimpleHandler.close(self)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
HTTPServer.server_bind(self)
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def demo_app(environ,start_response):
from StringIO import StringIO
stdout = StringIO()
print >>stdout, "Hello world!"
print >>stdout
h = environ.items(); h.sort()
for k,v in h:
print >>stdout, k,'=', repr(v)
start_response("200 OK", [('Content-Type','text/plain')])
return [stdout.getvalue()]
def make_server(
host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler
):
"""Create a new WSGI server listening on `host` and `port` for `app`"""
server = server_class((host, port), handler_class)
server.set_app(app)
return server
if __name__ == '__main__':
httpd = make_server('', 8000, demo_app)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
import webbrowser
webbrowser.open('http://localhost:8000/xyz?abc')
httpd.handle_request() # serve one request, then exit
|
agpl-3.0
|
pinterb/st2
|
st2actions/setup.py
|
3
|
1301
|
# -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='st2actions',
version='0.4.0',
description='',
author='StackStorm',
author_email='[email protected]',
install_requires=[
"pecan",
],
test_suite='st2actions',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['ez_setup'])
)
|
apache-2.0
|
Karosuo/Linux_tools
|
xls_handlers/xls_sum_venv/lib/python3.6/site-packages/pip/_vendor/chardet/gb2312prober.py
|
289
|
1754
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312_SM_MODEL
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
super(GB2312Prober, self).__init__()
self.coding_sm = CodingStateMachine(GB2312_SM_MODEL)
self.distribution_analyzer = GB2312DistributionAnalysis()
self.reset()
@property
def charset_name(self):
return "GB2312"
@property
def language(self):
return "Chinese"
|
gpl-3.0
|
Blake-R/pylijm
|
setup.py
|
1
|
1112
|
from __future__ import absolute_import, division, print_function, unicode_literals
from os import path
from setuptools import setup, find_packages
with open(path.join(path.dirname(__file__), 'README.md'), 'r') as fp:
long_description = fp.read()
setup(
name='pylijm',
version='1.1b2',
description='Python Lightweight JSON Model',
long_description=long_description,
url='https://github.com/blake-r/pylijm',
author='Oleg Blednov',
author_email='[email protected]',
license='GNUv3',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
],
keywords='dict json model document nosql',
packages=find_packages(exclude=['tests']),
install_requires=['six'],
extras_require={
'tests': ['unittest2', 'ujson']
}
)
|
gpl-3.0
|
david-zwicker/py-utils
|
utils/data_structures/parameter_mixin.py
|
1
|
1714
|
'''
Created on Oct 31, 2016
@author: David Zwicker <[email protected]>
'''
import copy
class ParameterMixin(object):
""" a mixin which manages a dictionary of parameters assigned to classes
"""
parameters_default = {}
def __init__(self, parameters=None, check_validity=True):
""" initialize the object with optional parameters that overwrite the
default behavior
`parameters` a dictionary of parameters overwriting the defaults
`check_validity` determines whether an error is raised if there are
keys in parameters that are not in the defaults
"""
# initialize parameters with default ones from all parent classes
self.parameters = {}
for cls in reversed(self.__class__.__mro__):
if hasattr(cls, 'parameters_default'):
# we need to make a deep copy to copy nested dictionaries
self.parameters.update(copy.deepcopy(cls.parameters_default))
# update parameters with the supplied ones
if parameters is not None:
if check_validity and any(key not in self.parameters
for key in parameters):
for key in parameters:
if key not in self.parameters:
raise ValueError('Parameter `{}` was provided in '
'instance specific parameters but is '
'not defined for the class `{}`'
.format(key, self.__class__.__name__))
self.parameters.update(parameters)
|
mit
|
witgo/spark
|
external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py
|
23
|
3611
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Consumes messages from a Amazon Kinesis streams and does wordcount.
This example spins up 1 Kinesis Receiver per shard for the given stream.
It then starts pulling from the last checkpointed sequence number of the given stream.
Usage: kinesis_wordcount_asl.py <app-name> <stream-name> <endpoint-url> <region-name>
<app-name> is the name of the consumer app, used to track the read data in DynamoDB
<stream-name> name of the Kinesis stream (i.e. mySparkStream)
<endpoint-url> endpoint of the Kinesis service
(e.g. https://kinesis.us-east-1.amazonaws.com)
<region-name> region name of the Kinesis endpoint (e.g. us-east-1)
Example:
# export AWS keys if necessary
$ export AWS_ACCESS_KEY_ID=<your-access-key>
$ export AWS_SECRET_ACCESS_KEY=<your-secret-key>
# run the example
$ bin/spark-submit --jars \
'external/kinesis-asl-assembly/target/spark-streaming-kinesis-asl-assembly_*.jar' \
external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py \
myAppName mySparkStream https://kinesis.us-east-1.amazonaws.com us-east-1
There is a companion helper class called KinesisWordProducerASL which puts dummy data
onto the Kinesis stream.
This code uses the DefaultAWSCredentialsProviderChain to find credentials
in the following order:
Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
Java System Properties - aws.accessKeyId and aws.secretKey
Credential profiles file - default location (~/.aws/credentials) shared by all AWS SDKs
Instance profile credentials - delivered through the Amazon EC2 metadata service
For more information, see
http://docs.aws.amazon.com/AWSSdkDocsJava/latest/DeveloperGuide/credentials.html
See http://spark.apache.org/docs/latest/streaming-kinesis-integration.html for more details on
the Kinesis Spark Streaming integration.
"""
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kinesis import KinesisUtils, InitialPositionInStream
if __name__ == "__main__":
if len(sys.argv) != 5:
print(
"Usage: kinesis_wordcount_asl.py <app-name> <stream-name> <endpoint-url> <region-name>",
file=sys.stderr)
sys.exit(-1)
sc = SparkContext(appName="PythonStreamingKinesisWordCountAsl")
ssc = StreamingContext(sc, 1)
appName, streamName, endpointUrl, regionName = sys.argv[1:]
lines = KinesisUtils.createStream(
ssc, appName, streamName, endpointUrl, regionName, InitialPositionInStream.LATEST, 2)
counts = lines.flatMap(lambda line: line.split(" ")) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a+b)
counts.pprint()
ssc.start()
ssc.awaitTermination()
|
apache-2.0
|
chrsrds/scikit-learn
|
examples/neighbors/plot_nca_illustration.py
|
1
|
2974
|
"""
=============================================
Neighborhood Components Analysis Illustration
=============================================
An example illustrating the goal of learning a distance metric that maximizes
the nearest neighbors classification accuracy. The example is solely for
illustration purposes. Please refer to the :ref:`User Guide <nca>` for
more information.
"""
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.neighbors import NeighborhoodComponentsAnalysis
from matplotlib import cm
from sklearn.utils.fixes import logsumexp
print(__doc__)
n_neighbors = 1
random_state = 0
# Create a tiny data set of 9 samples from 3 classes
X, y = make_classification(n_samples=9, n_features=2, n_informative=2,
n_redundant=0, n_classes=3, n_clusters_per_class=1,
class_sep=1.0, random_state=random_state)
# Plot the points in the original space
plt.figure()
ax = plt.gca()
# Draw the graph nodes
for i in range(X.shape[0]):
ax.text(X[i, 0], X[i, 1], str(i), va='center', ha='center')
ax.scatter(X[i, 0], X[i, 1], s=300, c=cm.Set1(y[[i]]), alpha=0.4)
def p_i(X, i):
diff_embedded = X[i] - X
dist_embedded = np.einsum('ij,ij->i', diff_embedded,
diff_embedded)
dist_embedded[i] = np.inf
# compute exponentiated distances (use the log-sum-exp trick to
# avoid numerical instabilities
exp_dist_embedded = np.exp(-dist_embedded -
logsumexp(-dist_embedded))
return exp_dist_embedded
def relate_point(X, i, ax):
pt_i = X[i]
for j, pt_j in enumerate(X):
thickness = p_i(X, i)
if i != j:
line = ([pt_i[0], pt_j[0]], [pt_i[1], pt_j[1]])
ax.plot(*line, c=cm.Set1(y[j]),
linewidth=5*thickness[j])
# we consider only point 3
i = 3
# Plot bonds linked to sample i in the original space
relate_point(X, i, ax)
ax.set_title("Original points")
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.axis('equal')
# Learn an embedding with NeighborhoodComponentsAnalysis
nca = NeighborhoodComponentsAnalysis(max_iter=30, random_state=random_state)
nca = nca.fit(X, y)
# Plot the points after transformation with NeighborhoodComponentsAnalysis
plt.figure()
ax2 = plt.gca()
# Get the embedding and find the new nearest neighbors
X_embedded = nca.transform(X)
relate_point(X_embedded, i, ax2)
for i in range(len(X)):
ax2.text(X_embedded[i, 0], X_embedded[i, 1], str(i),
va='center', ha='center')
ax2.scatter(X_embedded[i, 0], X_embedded[i, 1], s=300, c=cm.Set1(y[[i]]),
alpha=0.4)
# Make axes equal so that boundaries are displayed correctly as circles
ax2.set_title("NCA embedding")
ax2.axes.get_xaxis().set_visible(False)
ax2.axes.get_yaxis().set_visible(False)
ax2.axis('equal')
plt.show()
|
bsd-3-clause
|
gannetson/django
|
tests/view_tests/urls.py
|
82
|
3317
|
# -*- coding: utf-8 -*-
from functools import partial
from os import path
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.utils._os import upath
from django.utils.translation import ugettext_lazy as _
from django.views import defaults, i18n, static
from . import views
base_dir = path.dirname(path.abspath(upath(__file__)))
media_dir = path.join(base_dir, 'media')
locale_dir = path.join(base_dir, 'locale')
js_info_dict = {
'domain': 'djangojs',
'packages': ('view_tests',),
}
js_info_dict_english_translation = {
'domain': 'djangojs',
'packages': ('view_tests.app0',),
}
js_info_dict_multi_packages1 = {
'domain': 'djangojs',
'packages': ('view_tests.app1', 'view_tests.app2'),
}
js_info_dict_multi_packages2 = {
'domain': 'djangojs',
'packages': ('view_tests.app3', 'view_tests.app4'),
}
js_info_dict_admin = {
'domain': 'djangojs',
'packages': ('django.contrib.admin', 'view_tests'),
}
js_info_dict_app1 = {
'domain': 'djangojs',
'packages': ('view_tests.app1',),
}
js_info_dict_app2 = {
'domain': 'djangojs',
'packages': ('view_tests.app2',),
}
js_info_dict_app5 = {
'domain': 'djangojs',
'packages': ('view_tests.app5',),
}
urlpatterns = [
url(r'^$', views.index_page),
# Default views
url(r'^non_existing_url/', partial(defaults.page_not_found, exception=None)),
url(r'^server_error/', defaults.server_error),
# a view that raises an exception for the debug view
url(r'raises/$', views.raises),
url(r'raises400/$', views.raises400),
url(r'raises403/$', views.raises403),
url(r'raises404/$', views.raises404),
url(r'raises500/$', views.raises500),
url(r'technical404/$', views.technical404, name="my404"),
url(r'classbased404/$', views.Http404View.as_view()),
# i18n views
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^jsi18n/$', i18n.javascript_catalog, js_info_dict),
url(r'^jsi18n/app1/$', i18n.javascript_catalog, js_info_dict_app1),
url(r'^jsi18n/app2/$', i18n.javascript_catalog, js_info_dict_app2),
url(r'^jsi18n/app5/$', i18n.javascript_catalog, js_info_dict_app5),
url(r'^jsi18n_english_translation/$', i18n.javascript_catalog, js_info_dict_english_translation),
url(r'^jsi18n_multi_packages1/$', i18n.javascript_catalog, js_info_dict_multi_packages1),
url(r'^jsi18n_multi_packages2/$', i18n.javascript_catalog, js_info_dict_multi_packages2),
url(r'^jsi18n_admin/$', i18n.javascript_catalog, js_info_dict_admin),
url(r'^jsi18n_template/$', views.jsi18n),
url(r'^jsi18n_multi_catalogs/$', views.jsi18n_multi_catalogs),
# Static views
url(r'^site_media/(?P<path>.*)$', static.serve, {'document_root': media_dir}),
]
urlpatterns += i18n_patterns(
url(_(r'^translated/$'), views.index_page, name='i18n_prefixed'),
)
urlpatterns += [
url(r'view_exception/(?P<n>[0-9]+)/$', views.view_exception, name='view_exception'),
url(r'template_exception/(?P<n>[0-9]+)/$', views.template_exception, name='template_exception'),
url(r'^raises_template_does_not_exist/(?P<path>.+)$', views.raises_template_does_not_exist, name='raises_template_does_not_exist'),
url(r'^render_no_template/$', views.render_no_template, name='render_no_template'),
]
|
bsd-3-clause
|
shivaenigma/electrum
|
gui/qt/__init__.py
|
2
|
8471
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import signal
try:
import PyQt4
except Exception:
sys.exit("Error: Could not import PyQt4 on Linux systems, you may try 'sudo apt-get install python-qt4'")
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
from electrum.i18n import _, set_language
from electrum.plugins import run_hook
from electrum import SimpleConfig, Wallet, WalletStorage
from electrum.paymentrequest import InvoiceStore
from electrum.contacts import Contacts
from installwizard import InstallWizard
try:
import icons_rc
except Exception:
sys.exit("Error: Could not import icons_rc.py, please generate it with: 'pyrcc4 icons.qrc -o gui/qt/icons_rc.py'")
from util import * # * needed for plugins
from main_window import ElectrumWindow
class OpenFileEventFilter(QObject):
def __init__(self, windows):
self.windows = windows
super(OpenFileEventFilter, self).__init__()
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(event.url().toEncoded())
return True
return False
class ElectrumGui:
def __init__(self, config, network, plugins):
set_language(config.get('language'))
self.network = network
self.config = config
self.plugins = plugins
self.windows = []
self.efilter = OpenFileEventFilter(self.windows)
self.app = QApplication(sys.argv)
self.app.installEventFilter(self.efilter)
self.timer = Timer()
# shared objects
self.invoices = InvoiceStore(self.config)
self.contacts = Contacts(self.config)
# init tray
self.dark_icon = self.config.get("dark_icon", False)
self.tray = QSystemTrayIcon(self.tray_icon(), None)
self.tray.setToolTip('Electrum')
self.tray.activated.connect(self.tray_activated)
self.build_tray_menu()
self.tray.show()
self.app.connect(self.app, QtCore.SIGNAL('new_window'), self.start_new_window)
def build_tray_menu(self):
# Avoid immediate GC of old menu when window closed via its action
self.old_menu = self.tray.contextMenu()
m = QMenu()
for window in self.windows:
submenu = m.addMenu(window.wallet.basename())
submenu.addAction(_("Show/Hide"), window.show_or_hide)
submenu.addAction(_("Close"), window.close)
m.addAction(_("Dark/Light"), self.toggle_tray_icon)
m.addSeparator()
m.addAction(_("Exit Electrum"), self.close)
self.tray.setContextMenu(m)
def tray_icon(self):
if self.dark_icon:
return QIcon(':icons/electrum_dark_icon.png')
else:
return QIcon(':icons/electrum_light_icon.png')
def toggle_tray_icon(self):
self.dark_icon = not self.dark_icon
self.config.set_key("dark_icon", self.dark_icon, True)
self.tray.setIcon(self.tray_icon())
def tray_activated(self, reason):
if reason == QSystemTrayIcon.DoubleClick:
if all([w.is_hidden() for w in self.windows]):
for w in self.windows:
w.bring_to_top()
else:
for w in self.windows:
w.hide()
def close(self):
for window in self.windows:
window.close()
def load_wallet_file(self, filename):
try:
storage = WalletStorage(filename)
except Exception as e:
QMessageBox.information(None, _('Error'), str(e), _('OK'))
return
if not storage.file_exists:
recent = self.config.get('recently_open', [])
if filename in recent:
recent.remove(filename)
self.config.set_key('recently_open', recent)
action = 'new'
else:
try:
wallet = Wallet(storage)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.warning(None, _('Warning'), str(e), _('OK'))
return
action = wallet.get_action()
# run wizard
if action is not None:
wizard = InstallWizard(self.config, self.network, storage)
wallet = wizard.run(action)
# keep current wallet
if not wallet:
return
else:
wallet.start_threads(self.network)
return wallet
def get_wallet_folder(self):
#return os.path.dirname(os.path.abspath(self.wallet.storage.path if self.wallet else self.wallet.storage.path))
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d"%i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
filename = line_dialog(None, _('New Wallet'), _('Enter file name') + ':', _('OK'), filename)
if not filename:
return
full_path = os.path.join(wallet_folder, filename)
storage = WalletStorage(full_path)
if storage.file_exists:
QMessageBox.critical(None, "Error", _("File exists"))
return
wizard = InstallWizard(self.config, self.network, storage)
wallet = wizard.run('new')
if wallet:
self.new_window(full_path)
def new_window(self, path, uri=None):
# Use a signal as can be called from daemon thread
self.app.emit(SIGNAL('new_window'), path, uri)
def start_new_window(self, path, uri):
for w in self.windows:
if w.wallet.storage.path == path:
w.bring_to_top()
break
else:
wallet = self.load_wallet_file(path)
if not wallet:
return
w = ElectrumWindow(self.config, self.network, self)
w.connect_slots(self.timer)
# load new wallet in gui
w.load_wallet(wallet)
# save path
if self.config.get('wallet_path') is None:
self.config.set_key('gui_last_wallet', path)
# add to recently visited
w.update_recently_visited(path)
# initial configuration
if self.config.get('hide_gui') is True and self.tray.isVisible():
w.hide()
else:
w.show()
self.windows.append(w)
self.build_tray_menu()
self.plugins.on_new_window(w)
if uri:
w.pay_to_URI(uri)
return w
def close_window(self, window):
self.windows.remove(window)
self.build_tray_menu()
self.plugins.on_close_window(window)
def main(self):
self.timer.start()
last_wallet = self.config.get('gui_last_wallet')
if last_wallet is not None and self.config.get('wallet_path') is None:
if os.path.exists(last_wallet):
self.config.cmdline_options['default_wallet_path'] = last_wallet
if not self.start_new_window(self.config.get_wallet_path(),
self.config.get('url')):
return
signal.signal(signal.SIGINT, lambda *args: self.app.quit())
# main loop
self.app.exec_()
# clipboard persistence. see http://www.mail-archive.com/[email protected]/msg17328.html
event = QtCore.QEvent(QtCore.QEvent.Clipboard)
self.app.sendEvent(self.app.clipboard(), event)
self.tray.hide()
|
gpl-3.0
|
Eficent/odoomrp-wip
|
procurement_orderpoint_no_confirm/wizard/orderpoint_procurement.py
|
26
|
1434
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
class ProcurementCompute(models.TransientModel):
_inherit = 'procurement.orderpoint.compute'
@api.multi
def procure_calculation(self):
config_param_obj = self.env['ir.config_parameter']
config_param = config_param_obj.search(
[('key', '=', 'procurement.order')])
if not config_param:
config_param_obj.create({'key': 'procurement.order',
'value': 'no_confirm'})
return super(ProcurementCompute, self).procure_calculation()
|
agpl-3.0
|
jonasschnelli/bitcoin
|
test/functional/test_framework/util.py
|
3
|
19892
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import unhexlify
from decimal import Decimal, ROUND_DOWN
from subprocess import CalledProcessError
import inspect
import json
import logging
import os
import re
import time
import unittest
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
from io import BytesIO
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_approx(v, vexp, vspan=0.00001):
"""Assert that `v` is within `vspan` of `vexp`"""
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError("Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError("String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError("String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def EncodeDecimal(o):
if isinstance(o, Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until_helper(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, timeout_factor=1.0):
"""Sleep until the predicate resolves to be True.
Warning: Note that this method is not recommended to be used in tests as it is
not aware of the context of the test framework. Using the `wait_until()` members
from `BitcoinTestFramework` or `P2PInterface` class ensures the timeout is
properly scaled. Furthermore, `wait_until()` from `P2PInterface` class in
`p2p.py` has a preset lock.
"""
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
timeout = timeout * timeout_factor
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 12
# Don't assign rpc or p2p ports lower than this
PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=11000))
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
coveragedir (str): Directory
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = int(timeout)
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert n <= MAX_NODES
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, chain, rpchost):
rpc_u, rpc_p = get_auth_cookie(datadir, chain)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n, chain):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
# Translate chain name to config name
if chain == 'testnet3':
chain_name_conf_arg = 'testnet'
chain_name_conf_section = 'test'
else:
chain_name_conf_arg = chain
chain_name_conf_section = chain
with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f:
f.write("{}=1\n".format(chain_name_conf_arg))
f.write("[{}]\n".format(chain_name_conf_section))
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("fallbackfee=0.0002\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("dnsseed=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
f.write("upnp=0\n")
f.write("shrinkdebugfile=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir, chain):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "bitcoin.conf")):
with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
try:
with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
except OSError:
pass
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir, chain):
if os.path.isfile(os.path.join(datadir, chain, ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, chain, ".cookie"))
def softfork_active(node, key):
"""Return whether a softfork is active."""
return node.getblockchaininfo()['softforks'][key]['active']
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for _ in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert len(utxos) >= count
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for _ in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = []
from .messages import CTxOut
txout = CTxOut()
txout.nValue = 0
txout.scriptPubKey = hex_str_to_bytes(script_pubkey)
for _ in range(128):
txouts.append(txout)
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
from .messages import CTransaction
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(rawtx)))
for txout in txouts:
tx.vout.append(txout)
newtx = tx.serialize().hex()
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], 0)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
def modinv(a, n):
"""Compute the modular inverse of a modulo n using the extended Euclidean
Algorithm. See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers.
"""
# TODO: Change to pow(a, -1, n) available in Python 3.8
t1, t2 = 0, 1
r1, r2 = n, a
while r2 != 0:
q = r1 // r2
t1, t2 = t2, t1 - q * t2
r1, r2 = r2, r1 - q * r2
if r1 > 1:
return None
if t1 < 0:
t1 += n
return t1
class TestFrameworkUtil(unittest.TestCase):
def test_modinv(self):
test_vectors = [
[7, 11],
[11, 29],
[90, 13],
[1891, 3797],
[6003722857, 77695236973],
]
for a, n in test_vectors:
self.assertEqual(modinv(a, n), pow(a, n-2, n))
|
mit
|
maelnor/nova
|
nova/api/openstack/__init__.py
|
14
|
16979
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for OpenStack API controllers.
"""
from oslo.config import cfg
import routes
import stevedore
import webob.dec
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
from nova.i18n import _
from nova.i18n import _LC
from nova.i18n import _LI
from nova.i18n import _LW
from nova.i18n import translate
from nova import notifications
from nova.openstack.common import log as logging
from nova import utils
from nova import wsgi as base_wsgi
api_opts = [
cfg.BoolOpt('enabled',
default=False,
help='Whether the V3 API is enabled or not'),
cfg.ListOpt('extensions_blacklist',
default=[],
help='A list of v3 API extensions to never load. '
'Specify the extension aliases here.'),
cfg.ListOpt('extensions_whitelist',
default=[],
help='If the list is not empty then a v3 API extension '
'will only be loaded if it exists in this list. Specify '
'the extension aliases here.')
]
api_opts_group = cfg.OptGroup(name='osapi_v3', title='API v3 Options')
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_group(api_opts_group)
CONF.register_opts(api_opts, api_opts_group)
# List of v3 API extensions which are considered to form
# the core API and so must be present
# TODO(cyeoh): Expand this list as the core APIs are ported to V3
API_V3_CORE_EXTENSIONS = set(['consoles',
'extensions',
'flavor-extra-specs',
'flavor-manage',
'flavors',
'ips',
'os-keypairs',
'os-flavor-access',
'server-metadata',
'servers',
'versions'])
class FaultWrapper(base_wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
_status_to_type = {}
@staticmethod
def status_to_type(status):
if not FaultWrapper._status_to_type:
for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError):
FaultWrapper._status_to_type[clazz.code] = clazz
return FaultWrapper._status_to_type.get(
status, webob.exc.HTTPInternalServerError)()
def _error(self, inner, req):
LOG.exception(_("Caught error: %s"), unicode(inner))
safe = getattr(inner, 'safe', False)
headers = getattr(inner, 'headers', None)
status = getattr(inner, 'code', 500)
if status is None:
status = 500
msg_dict = dict(url=req.url, status=status)
LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
outer = self.status_to_type(status)
if headers:
outer.headers = headers
# NOTE(johannes): We leave the explanation empty here on
# purpose. It could possibly have sensitive information
# that should not be returned back to the user. See
# bugs 868360 and 874472
# NOTE(eglynn): However, it would be over-conservative and
# inconsistent with the EC2 API to hide every exception,
# including those that are safe to expose, see bug 1021373
if safe:
user_locale = req.best_match_language()
inner_msg = translate(inner.message, user_locale)
outer.explanation = '%s: %s' % (inner.__class__.__name__,
inner_msg)
notifications.send_api_fault(req.url, status, inner)
return wsgi.Fault(outer)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
return self._error(ex, req)
class APIMapper(routes.Mapper):
def routematch(self, url=None, environ=None):
if url == "":
result = self._match("", environ)
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
def connect(self, *args, **kargs):
# NOTE(vish): Default the format part of a route to only accept json
# and xml so it doesn't eat all characters after a '.'
# in the url.
kargs.setdefault('requirements', {})
if not kargs['requirements'].get('format'):
kargs['requirements']['format'] = 'json|xml'
return routes.Mapper.connect(self, *args, **kargs)
class ProjectMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
if 'parent_resource' not in kwargs:
kwargs['path_prefix'] = '{project_id}/'
else:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
p_member)
routes.Mapper.resource(self, member_name,
collection_name,
**kwargs)
class PlainMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
if 'parent_resource' in kwargs:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '%s/:%s_id' % (p_collection, p_member)
routes.Mapper.resource(self, member_name,
collection_name,
**kwargs)
class APIRouter(base_wsgi.Router):
"""Routes requests on the OpenStack API to the appropriate controller
and method.
"""
ExtensionManager = None # override in subclasses
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory, :class:`nova.wsgi.Router` doesn't have one."""
return cls()
def __init__(self, ext_mgr=None, init_only=None):
if ext_mgr is None:
if self.ExtensionManager:
ext_mgr = self.ExtensionManager()
else:
raise Exception(_("Must specify an ExtensionManager class"))
mapper = ProjectMapper()
self.resources = {}
self._setup_routes(mapper, ext_mgr, init_only)
self._setup_ext_routes(mapper, ext_mgr, init_only)
self._setup_extensions(ext_mgr)
super(APIRouter, self).__init__(mapper)
def _setup_ext_routes(self, mapper, ext_mgr, init_only):
for resource in ext_mgr.get_resources():
LOG.debug('Extending resource: %s',
resource.collection)
if init_only is not None and resource.collection not in init_only:
continue
inherits = None
if resource.inherits:
inherits = self.resources.get(resource.inherits)
if not resource.controller:
resource.controller = inherits.controller
wsgi_resource = wsgi.Resource(resource.controller,
inherits=inherits)
self.resources[resource.collection] = wsgi_resource
kargs = dict(
controller=wsgi_resource,
collection=resource.collection_actions,
member=resource.member_actions)
if resource.parent:
kargs['parent_resource'] = resource.parent
mapper.resource(resource.collection, resource.collection, **kargs)
if resource.custom_routes_fn:
resource.custom_routes_fn(mapper, wsgi_resource)
def _setup_extensions(self, ext_mgr):
for extension in ext_mgr.get_controller_extensions():
collection = extension.collection
controller = extension.controller
msg_format_dict = {'collection': collection,
'ext_name': extension.extension.name}
if collection not in self.resources:
LOG.warn(_LW('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource'),
msg_format_dict)
continue
LOG.debug('Extension %(ext_name)s extended resource: '
'%(collection)s',
msg_format_dict)
resource = self.resources[collection]
resource.register_actions(controller)
resource.register_extensions(controller)
def _setup_routes(self, mapper, ext_mgr, init_only):
raise NotImplementedError()
class APIRouterV21(base_wsgi.Router):
"""Routes requests on the OpenStack v2.1 API to the appropriate controller
and method.
"""
# TODO(oomichi): This namespace will be changed after moving all v3 APIs
# to v2.1.
API_EXTENSION_NAMESPACE = 'nova.api.v3.extensions'
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory, :class:`nova.wsgi.Router` doesn't have one."""
return cls()
def __init__(self, init_only=None, v3mode=False):
# TODO(cyeoh): bp v3-api-extension-framework. Currently load
# all extensions but eventually should be able to exclude
# based on a config file
# TODO(oomichi): We can remove v3mode argument after moving all v3 APIs
# to v2.1.
def _check_load_extension(ext):
if (self.init_only is None or ext.obj.alias in
self.init_only) and isinstance(ext.obj,
extensions.V3APIExtensionBase):
# Check whitelist is either empty or if not then the extension
# is in the whitelist
if (not CONF.osapi_v3.extensions_whitelist or
ext.obj.alias in CONF.osapi_v3.extensions_whitelist):
# Check the extension is not in the blacklist
if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist:
return self._register_extension(ext)
else:
LOG.warn(_LW("Not loading %s because it is "
"in the blacklist"), ext.obj.alias)
return False
else:
LOG.warn(
_LW("Not loading %s because it is not in the "
"whitelist"), ext.obj.alias)
return False
else:
return False
if not CONF.osapi_v3.enabled:
LOG.info(_LI("V3 API has been disabled by configuration"))
return
self.init_only = init_only
LOG.debug("v3 API Extension Blacklist: %s",
CONF.osapi_v3.extensions_blacklist)
LOG.debug("v3 API Extension Whitelist: %s",
CONF.osapi_v3.extensions_whitelist)
in_blacklist_and_whitelist = set(
CONF.osapi_v3.extensions_whitelist).intersection(
CONF.osapi_v3.extensions_blacklist)
if len(in_blacklist_and_whitelist) != 0:
LOG.warn(_LW("Extensions in both blacklist and whitelist: %s"),
list(in_blacklist_and_whitelist))
self.api_extension_manager = stevedore.enabled.EnabledExtensionManager(
namespace=self.API_EXTENSION_NAMESPACE,
check_func=_check_load_extension,
invoke_on_load=True,
invoke_kwds={"extension_info": self.loaded_extension_info})
if v3mode:
mapper = PlainMapper()
else:
mapper = ProjectMapper()
self.resources = {}
# NOTE(cyeoh) Core API support is rewritten as extensions
# but conceptually still have core
if list(self.api_extension_manager):
# NOTE(cyeoh): Stevedore raises an exception if there are
# no plugins detected. I wonder if this is a bug.
self.api_extension_manager.map(self._register_resources,
mapper=mapper)
self.api_extension_manager.map(self._register_controllers)
missing_core_extensions = self.get_missing_core_extensions(
self.loaded_extension_info.get_extensions().keys())
if not self.init_only and missing_core_extensions:
LOG.critical(_LC("Missing core API extensions: %s"),
missing_core_extensions)
raise exception.CoreAPIMissing(
missing_apis=missing_core_extensions)
super(APIRouterV21, self).__init__(mapper)
@staticmethod
def get_missing_core_extensions(extensions_loaded):
extensions_loaded = set(extensions_loaded)
missing_extensions = API_V3_CORE_EXTENSIONS - extensions_loaded
return list(missing_extensions)
@property
def loaded_extension_info(self):
raise NotImplementedError()
def _register_extension(self, ext):
raise NotImplementedError()
def _register_resources(self, ext, mapper):
"""Register resources defined by the extensions
Extensions define what resources they want to add through a
get_resources function
"""
handler = ext.obj
LOG.debug("Running _register_resources on %s", ext.obj)
for resource in handler.get_resources():
LOG.debug('Extended resource: %s', resource.collection)
inherits = None
if resource.inherits:
inherits = self.resources.get(resource.inherits)
if not resource.controller:
resource.controller = inherits.controller
wsgi_resource = wsgi.Resource(resource.controller,
inherits=inherits)
self.resources[resource.collection] = wsgi_resource
kargs = dict(
controller=wsgi_resource,
collection=resource.collection_actions,
member=resource.member_actions)
if resource.parent:
kargs['parent_resource'] = resource.parent
# non core-API plugins use the collection name as the
# member name, but the core-API plugins use the
# singular/plural convention for member/collection names
if resource.member_name:
member_name = resource.member_name
else:
member_name = resource.collection
mapper.resource(member_name, resource.collection,
**kargs)
if resource.custom_routes_fn:
resource.custom_routes_fn(mapper, wsgi_resource)
def _register_controllers(self, ext):
"""Register controllers defined by the extensions
Extensions define what resources they want to add through
a get_controller_extensions function
"""
handler = ext.obj
LOG.debug("Running _register_controllers on %s", ext.obj)
for extension in handler.get_controller_extensions():
ext_name = extension.extension.name
collection = extension.collection
controller = extension.controller
if collection not in self.resources:
LOG.warn(_LW('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource'),
{'ext_name': ext_name, 'collection': collection})
continue
LOG.debug('Extension %(ext_name)s extending resource: '
'%(collection)s',
{'ext_name': ext_name, 'collection': collection})
resource = self.resources[collection]
resource.register_actions(controller)
resource.register_extensions(controller)
|
apache-2.0
|
mythmon/airmozilla
|
airmozilla/popcorn/renderer.py
|
2
|
4098
|
import datetime
import os
import tempfile
import time
from uuid import uuid4
import boto
from boto.s3.key import Key
from funfactory.urlresolvers import reverse
from popcoder.popcoder import process_json
from django.db import transaction
from django.conf import settings
from django.utils import timezone
from airmozilla.base.utils import build_absolute_url, prepare_vidly_video_url
from airmozilla.popcorn.models import PopcornEdit
from airmozilla.main.models import Event, VidlySubmission
from airmozilla.manage import vidly
from airmozilla.uploads.models import Upload
@transaction.atomic
def render_edit(edit_id, verbose=False):
edit = PopcornEdit.objects.get(id=edit_id)
event = edit.event
filename = '%s.webm' % edit.id
filepath = os.path.join(tempfile.gettempdir(), filename)
if not (os.path.isfile(filepath) and
os.stat(filepath)[os.stat.st_size] > 0):
edit.status = PopcornEdit.STATUS_PROCESSING
edit.save()
if verbose:
print 'Rendering file at %s' % filepath
process_json(
data=edit.data['data'],
out=filepath,
background_color=edit.data['background']
)
prefix = datetime.datetime.utcnow().strftime('%Y/%m/%d/')
upload_file_name = prefix + uuid4().hex[:13] + '.webm'
if verbose:
print 'Connecting to s3 and uploading as %s' % upload_file_name
# Uploads file to s3
connection = boto.connect_s3(
settings.AWS_ACCESS_KEY_ID,
settings.AWS_SECRET_ACCESS_KEY,
)
bucket = connection.lookup(settings.S3_UPLOAD_BUCKET)
if bucket is None:
if verbose:
print 'Creating bucket %s' % settings.S3_UPLOAD_BUCKET
bucket = connection.create_bucket(settings.S3_UPLOAD_BUCKET)
video_key = Key(bucket)
video_key.key = upload_file_name
start = time.time()
video_key.set_contents_from_filename(filepath)
end = time.time()
video_url = video_key.generate_url(expires_in=0, query_auth=False)
if verbose:
print 'Video uploaded to S3 at url: %s' % video_url
video_url = prepare_vidly_video_url(video_url)
filesize = os.stat(filepath).st_size
upload = Upload.objects.create(
event=event,
user=edit.user,
url=video_url,
file_name=upload_file_name,
mime_type='video/webm',
size=filesize,
upload_time=int(end - start)
)
if verbose:
print 'Upload object created with id: %s' % upload.id
webhook_url = build_absolute_url(reverse('popcorn:vidly_webhook'))
token_protection = event.privacy != Event.PRIVACY_PUBLIC
# if the original submission was *without* HD stick to that
hd = True
if 'vid.ly' in event.template.name.lower():
submissions = (
VidlySubmission.objects
.filter(event=event)
.order_by('submission_time')
)
for submission in submissions[:1]:
hd = submission.hd
tag, error = vidly.add_media(
url=video_url,
token_protection=token_protection,
hd=hd,
notify_url=webhook_url,
)
if verbose:
print 'Vidly media added with tag %s' % tag
VidlySubmission.objects.create(
event=event,
url=video_url,
tag=tag,
hd=True,
submission_error=error
)
# raise exception if error
if error:
raise Exception(error)
edit.status = PopcornEdit.STATUS_SUCCESS
edit.finished = timezone.now()
edit.save()
if verbose:
print 'Removing file at %s' % filepath
os.remove(filepath)
def render_all_videos(verbose=False):
# Re process if start time is more than 2 hours
pending_edits = PopcornEdit.objects.filter(
status=PopcornEdit.STATUS_PENDING
)
if pending_edits.count() > 1:
if verbose:
print "Currently more than one edit. Skipping for now"
print pending_edits.count()
pending_edits = pending_edits[:1]
for pending_edit in pending_edits:
render_edit(pending_edit.id, verbose=verbose)
|
bsd-3-clause
|
adrian-soto/QEdark_repo
|
tools/bandsndos/bandsndos_Ge.py
|
4
|
20068
|
#
# Adrian Soto
# 22-12-2014
# Stony Brook University
#
################################################
# Plot band structure and DOS from the
# output of the bands.x program in the
# Quantum Espresso package.
#
# Features:
# 1) Allows for scissor correction (band shift)
# 2)
#
################################################
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.ticker import AutoMinorLocator
import matplotlib.gridspec as gridspec
import csv
plt.rcParams['font.family'] = 'Serif'
plt.rcParams['font.serif'] = 'Times New Roman'
#rcParams['text.usetex'] = True
rcParams['font.size'] = 24
class band:
def __init__(self, numkpoints, bandenergies):
self.nks = numkpoints
if (len(bandenergies) != numkpoints):
print "ERROR: list of band energies has wrong length. Setting band to 0."
self.nrg = [0] * numkpoints
else:
self.nrg = bandenergies
def printband(self):
print self.nrg
def shift(self, delta):
self.nrg = map(lambda x : x+delta, self.nrg) # watch for scope here.
return
################################################
# End of class band
################################################
class kpoints:
def __init__(self):
self.klist = []
class dos:
def __init__(self): #, numE, dosE, dosG, dosI):
self.numE = 0
self.dosE = []
self.dosG = []
self.dosI = []
def Load(self, dosfile):
#
# Load DOS from dos.x output
#
print " "
print "Loading DOS from ", dosfile
print " "
# Count lines in file
self.numE=sum(1 for line in open(dosfile))
# Read file line by line and process
f=open(dosfile, 'r')
# First line is header. Discard
data=f.readline()
# Iterate over file lines
for ilin in range(1,self.numE):
data=f.readline()
E=float(data[0:7])
self.dosE.append(E)
G=float(data[9:19])
self.dosG.append(G)
I=float(data[21:31])
self.dosI.append(I)
f.close()
return
################################################
# End of class dos
################################################
#
# Global functions
#
def w0gauss(x):
# As in flib/w0gauss.f90 in the QE package
pi = 3.141592653589793
sqrt2=math.sqrt(2)
arg = min([200.0, (x - 1.0 / sqrt2 ) **2])
w0 = (1.0/math.sqrt(pi)) * math.exp(-1.0 * arg )*(2.0 - sqrt2*x)
return w0
def ReadBandStructure(bandsfile):
#
# This function reads the band structure as written
# to output of the bands.x program. It returns the bs
# as a flat list with all energies and another list with
# the k-point coordinates.
#
f = open(bandsfile, 'r')
# First line contains nbnd and nks. Read.
currentline = f.readline()
nks = int(currentline[22:26])
nbnd = int(currentline[12:16])
# Following lines contain the k-point coordinates
# and the band energies.
# Calculate number of lines containing band structure:
# nks k-point lines
# At each k-point there are (1+nbnd/10) energy values.
nlpkp = 1+nbnd/10 # Number of Lines Per K-Point
nlines = nks + nks * nlpkp
bsaux = []
xk = []
for ik in range (0, nks):
currentline = f.readline()
#kpoint = currentline[12:40]
kpoint = [float(x) for x in currentline.split()]
xk.append(kpoint)
auxenerg = []
for ibnd in range(0, nlpkp):
currentline = f.readline()
# append current line to auxiliary list
auxenerg.append( float(x) for x in currentline.split() )
# flatten list of lists containing energies for a given kpoint
# (each sublist corresponds to one line in the bands.dat file)
energ = [item for sublist in auxenerg for item in sublist]
# Sort ascendingly band energies for current k-point (to
# prevent artificial level crossings if QE bands.x output
# does not sort them correctly) and append to band structure
bsaux.append(sorted(energ))
f.close()
# Flatten bs list
bsflat = [item for sublist in bsaux for item in sublist]
return nks, nbnd, xk, bsflat
def SortByBands(nks, nbnd, bsflat):
# Rearrarange bs from k-points to bands
bs = []
for ibnd in range (0, nbnd):
currentband=[]
for ik in range (0, nks):
#currentband.append(bsflat[ik*nbnd+ibnd])
bs.append(bsflat[ik*nbnd+ibnd])
#bs.append( currentband )
return bs
def FindHLGap(nks, hvb, lcb):
#
# Find HOMO and LUMO energies and energy gap
#
# hvb = highest valence band
# lcb = lowest conduction band
#
# Ehvb = highest valence energy or HOMO energy
# Elcb = lowest conduction energy or LUMO energy
#
gap = lcb[0] - hvb[0]
for ik1 in range (0, nks):
auxcond = lcb[ik1]
for ik2 in range (0, nks):
auxval = hvb[ik2]
currentgap = auxcond-auxval
if (currentgap < 0.0):
print "ERROR: negative gap"
elif (currentgap < gap):
gap = currentgap
Ehvb = max(hvb)
Elcb = min(lcb)
return Ehvb, Elcb, gap
def Scissor(nks, newgap, bands, shifttype):
#
# shifttype == 0 : shift valence bands by -0.5*delta and
# conduction bands by 0.5*delta
# shifttype == 1 : as in 0 but placing the highest valence
# energy at 0.0
# shifttype == 2 : as in 0 but placing the gap center at 0.0
#
EHOMO, ELUMO, oldgap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta=(newgap-oldgap)/2.0
# Apply scissor to band structure
for ibnd in range (0, nbnd):
if (ibnd < nval):
bands[ibnd].shift(-1.0*delta)
else:
bands[ibnd].shift(delta)
if (shifttype==0):
print "Scissor correction to band energies has been applied."
return
elif (shifttype==1):
EHOMO, ELUMO, gap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta = -1.0*EHOMO
#print "delta=", delta
for ibnd in range (0, nbnd):
bands[ibnd].shift(delta)
print "Scissor correction to band energies has been applied."
print "Highest valence energy has been set to 0.0 eV"
return
elif (shifttype==2):
EHOMO, ELUMO, gap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta = -0.5*(EHOMO+ELUMO)
for ibnd in range (0, nbnd):
bands[ibnd].shift(delta)
print "Scissor correction to band energies has been applied."
print "Gap center has been set to 0.0 eV"
return
else:
print "ERROR: shifttype has an non-valid value. Default value shifttype==0."
print "Scissor correction to band energies has been applied."
return
def CreateDOS(nks, nbnd, bzv, Emin, Emax, deltaE, bnd, normalize):
# ATTENTION: bnd must be an object of the class band
Emin = min(bnd[10].nrg)
Emax = max(bnd[nbnd-1].nrg)
ndos = int((Emax - Emin)/deltaE + 0.50000001) # int always rounds to lower integer
dosE = []
dosG = []
intg=0.0
deltaEgauss=5.0*deltaE
d3k=(1.0/nks)*bzv
wk=2.0/nks
print "Creating DOS with uniform k-point weights"
# Create DOS
for idos in range (0, ndos):
E = Emin + idos * deltaE
dosg = 0.0
for ik in range(0, nks):
for ibnd in range (0, nbnd):
dosg = dosg + w0gauss ( (E - bnd[ibnd].nrg[ik] ) / deltaEgauss ) * wk
###dosg = dosg + w0gauss ( (E - bnd[ibnd].nrg[ik] ) / deltaE ) * wk
dosg = dosg/deltaEgauss
intg = intg + dosg*deltaE # integrated DOS
dosE.append(E)
dosG.append(dosg)
print "\n Integrated DOS=", intg, "\n"
# Normalize DOS
if (normalize == 1):
print "Normalizing DOS to 1.0 \n"
dosGnorm=dosG
for idos in range (0, ndos):
dosGnorm[idos]=dosGnorm[idos]/intg
return dosE, dosGnorm
if(normalize==0):
return dosE, dosG
else:
print " ERROR!! in CreateDOS function: wrong DOS normalization choice."
return
def PlotBandStructure(nbnd, nval, bnd, plotfile, Ef, sympoints, nks_btw_sympoints ):
#
# ATTENTION: bnd must be an object of the class band
#
# nval: number of valence bands
# Ef: Fermi Energy. If false then it won't print horizontal line
# sympoints: list containing labels of symmetry points
# nks_btw_sympoints: number of k-points between symmetry points
#
# NOTE: this function assumes that the number of points
# between symmetry points is constant
#
print "Plotting band structure to", plotfile
col = 'k'
for ibnd in range (0, nbnd):
#if (ibnd < nval):
# col='b'
#else:
# col='r'
plt.plot(bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o')
y_min = min(bnd[0].nrg)
y_max = min(bnd[nbnd-1].nrg)
plt.xlabel("Brillouin zone path")
plt.ylabel("band energies (eV)")
numsympoints = len(sympoints)
kpath=[]
xticks = range(0, numsympoints*nks_btw_sympoints + 1, nks_btw_sympoints)
for i in range(0, numsympoints):
kpath.append(sympoints[i])
if (i < numsympoints-1):
for j in range (0, nks_btw_sympoints-1):
kpath.append('')
# plt.axvline(x=xticks, ymin=0, ymax=1, hold=None, **kwargs)
# Ticks and vertical lines across BS plot
plt.xticks(xticks, sympoints)
for i in range(0,numsympoints):
plt.axvline(x=xticks[i], ymin=y_min, ymax=y_max, hold=None, color='k', linewidth=0.25)
if (not Ef):
plt.axhline(Ef, color="black", linestyle="--")
plt.xlim( 0, len(bnd[0].nrg)-1 )
plt.savefig(plotfile)
return
def PlotDOS(dosE, dosG, plotname):
# ATTENTION: dosG and dosE must be lists of reals
plt.plot(dosG, dosE)
plt.xlabel("Density Of States")
plt.ylabel("band energies (eV)")
plt.gca().set_xlim(left=0)
plt.savefig(plotname)
return
def PlotBnD(nbnd, nval, bnd, Ef, sympoints, nks_btw_sympoints, dosE, dosG, plotname):
col = 'k'
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
for ibnd in range (0, nbnd):
ax1.plot(bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o')
ax1.set_title('Sharing Y axis')
ax2.plot(dosG, dosE)
ax2.set_xlim([0.0, 0.1])
plt.ylim([-15.0, 20.0])
#plt.subplots_adjust(left=0.0, right=0.8)
plt.subplots_adjust(wspace = 0.0)
plt.show()
return
def PlotBnDD(nbnd, nval, bnd, Ef, sympoints, nks_btw_sympoints, sym_pt_dists, dosE1, dosG1, dosE2, dosG2, plotname):
######################################
# Plot generation and formatting
######################################
# Two subplots, unpack the axes array immediately
gs = gridspec.GridSpec(1, 2,width_ratios=[1,4])
f = plt.figure()
ax1 = plt.subplot(gs[1])
ax2 = plt.subplot(gs[0])
# Formatting
col = 'k'
ax1.set_xlabel("Brillouin zone path")
ax1.xaxis.set_label_position("bottom")
ax1.set_ylabel("E [eV]", rotation=270)
ax1.yaxis.set_label_position("right")
ax1.text(3.50-0.12, -12.50, 'Ge', fontsize=28)
###ax2.text(0.07, 18.00, 'Si', fontsize=18)
ax2.set_xlabel("DOS \n [eV$^{-1}$]")
ax2.xaxis.set_label_position("top")
#ax2.set_ylabel("E [eV]", rotation=270)
#y_min = -32.0
y_min = -13.0
y_max = 20.0
x2_min = 0.00
x2_max = 5.00
# Mirror
x2_min = 0.12
x2_max = 0.00
ax1.set_ylim([y_min, y_max])
ax2.set_xlim([x2_min, x2_max])
#ax2.set_xlim([0.0, 10.0])
ax2.set_ylim([y_min, y_max])
# Ticks
#minor_locator = AutoMinorLocator(2)
#ax2.xaxis.set_minor_locator(minor_locator)
# Number of symmetry points
numsympoints = len(sympoints)
# Generate horizontal axis containing k-path accumulated length (for BS plot)
x=0.0
klen=[x]
dx=1.0/((numsympoints-1)*nks_btw_sympoints)
for isym in range(0, numsympoints-1):
dx=sym_pt_dists[isym]/nks_btw_sympoints
for ipt in range(1, nks_btw_sympoints+1):
x=x+dx
klen.append(x)
#xticks = range(0, numsympoints*nks_btw_sympoints + 1, nks_btw_sympoints)
xticks=[]
for isym in range(0, numsympoints):
j = isym * nks_btw_sympoints
xticks.append(klen[j])
x1_min=min(xticks)
x1_max=max(xticks)
ax1.set_xlim(x1_min, x1_max)
# Plot bands
col = '0.4'
for ibnd in range (0, nbnd):
ax1.plot(klen , bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o')
# plt.axvline(x=xticks, ymin=0, ymax=1, hold=None, **kwargs)
# Ticks and vertical lines across BS plot
ax1.set_xticks(xticks)
ax1.set_xticklabels(sympoints)
# Plot DOSs
ax2.plot(dosG1, dosE1, linestyle='-', linewidth=1.0, color='b')
ax2.plot(dosG2, dosE2, linestyle='-', color='r')
#dosticks=[0.0, 0.05, 0.1, 0.15]
dosticks=[5, 0] # Mirror
ax2.set_xticks(dosticks)
ax2.set_xticklabels(dosticks)
#minor_locator = AutoMinorLocator(5)
#ax2.xaxis.set_minor_locator(minor_locator)
minorx2ticks=[4, 3, 2, 1]
ax2.set_xticks(minorx2ticks, minor = True)
# BS ticks
yticks=[-10, -5, 0, 5, 10, 15, 20]
minor_locator = AutoMinorLocator(5)
ax1.yaxis.set_minor_locator(minor_locator)
ax2.yaxis.set_minor_locator(minor_locator)
ax1.xaxis.tick_top()
#ax1.set_yticks(yticks)
#ax1.set_yticklabels(yticks)
# Mirror
ax1.yaxis.tick_right()
ax1.set_yticks(yticks)
ax1.set_yticklabels(yticks)
ax2.set_yticklabels([])
#plt.subplots_adjust(left=0.0, right=0.8)
plt.subplots_adjust(wspace = 0.0)
# Attempt to fill the area to the left of the DOS
# split values into positive and negative
alpha_fill=0.5
dosE1neg=[]
dosG1neg=[]
dosE1pos=[]
dosG1pos=[]
for i in range(0, len(dosE1)):
if(dosE1[i]<0.0):
dosE1neg.append(dosE1[i])
dosG1neg.append(dosG1[i])
else:
dosE1pos.append(dosE1[i])
dosG1pos.append(dosG1[i])
dosE1new =[y_min]+dosE1+[y_max]
dosG1new =[0.0]+dosG1+[0.0]
ax2.fill_between(dosG1new, 0, dosE1new, alpha=alpha_fill, linewidth=0.0, edgecolor='w')
# Vertical lines across BS plot
for i in range(0,numsympoints):
ax1.axvline(x=xticks[i], ymin=y_min, ymax=y_max, color='k', linewidth=0.25)
# Horizontal line at top of valence band
if (not Ef):
ax1.axhline(Ef, color="black", linestyle="--")
ax2.axhline(Ef, color="black", linestyle="--")
#plt.show()
plt.savefig(plotname, bbox_inches='tight')
return
def PlotMultipleDOS(dosE, dosG, plotname):
# ATTENTION: dosG and dosE must be lists of lists of reals
Ndos=len(dosE[:])
for i in range(0, Ndos):
plt.plot(dosG[i], dosE[i])
plt.xlabel("Density Of States")
plt.ylabel("band energies (eV)")
plt.savefig(plotname)
return
#def WriteBandStructure():
# print (" %10.6f%10.6f%10.6f" % (kpoint[0], kpoint[1], kpoint[2]) )
############################################################################################
############################################################################################
############################################################################################
############################################################################################
############################ PROGRAM STARTS HERE ###################################
############################################################################################
############################################################################################
############################################################################################
############################################################################################
bohr2ang=0.52918
############
# Band structure
############
filename="ge.bands.dat"
nks = 0
nbnd=0
xk=[]
bsflt=[]
bs=[]
sympoints=['$L$','$\Gamma$', '$X$', '$W$', '$K$', '$\Gamma$']
sym_pt_dists=[0.5*math.sqrt(3), 1.0, 0.5, 0.25*math.sqrt(2), 0.75*math.sqrt(2)] ## distances between symmetry points (by hand)
nks_btw_sympoints=50
# Read from file and sort bs by bands
nks, nbnd, xk, bsflt = ReadBandStructure(filename)
if(nbnd==0):
print "%% ERROR READING BANDS. EXIT %%"
else:
bs = SortByBands(nks, nbnd, bsflt)
print "nks=", nks
print "nbnd=", nbnd
# Create band objects
bands=[]
for ibnd in range (0, nbnd):
ledge = ibnd*nks
redge = ledge+nks
currentband = bs[ledge:redge]
bands.append( band(nks, currentband) )
# Scissor correction
# Si
###alat = 10.330495 # Bohr
###nval = 4 # for Si
###exptgap = 1.11 # eV # Si
# Ge
alat = 10.8171069 # Bohr
nval = 14 # for Ge with semicore
exptgap = 0.67 # Ge
# Convert to ANG and calculate BZV
alat=alat*bohr2ang
V=(alat**3)/4.0 # Good for FCC
bzv = (2.0*math.pi)**3/V
ncond = nbnd - nval
Scissor(nks, exptgap, bands, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0
print "Scissor correction with gap set to", exptgap
#############
# DOS
#############
filename='ge.bands_full.dat'
nks1, nbnd1, xk1, bsflt1 = ReadBandStructure(filename)
if(nbnd==0):
print "%% ERROR READING BANDS. EXIT %%"
else:
bs1 = SortByBands(nks1, nbnd1, bsflt1)
print "nks=", nks1
print "nbnd=", nbnd1
# Create band objects
bands1=[]
for ibnd in range (0, nbnd1):
ledge1 = ibnd*nks1
redge1 = ledge1+nks1
currentband1 = bs1[ledge1:redge1]
bands1.append( band(nks1, currentband1) )
# Scissor correction
Scissor(nks1, exptgap, bands1, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0
print "Scissor correction with gap set to", exptgap
filename='ge.bands_243.dat'
nks2, nbnd2, xk2, bsflt2 = ReadBandStructure(filename)
if(nbnd==0):
print "%% ERROR READING BANDS. EXIT %%"
else:
bs2 = SortByBands(nks2, nbnd2, bsflt2)
print "nks=", nks2
print "nbnd=", nbnd2
# Create band objects
bands2=[]
for ibnd in range (0, nbnd2):
ledge2 = ibnd*nks2
redge2 = ledge2+nks2
currentband2 = bs2[ledge2:redge2]
bands2.append( band(nks2, currentband2) )
# Scissor correction
Scissor(nks2, exptgap, bands2, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0
print "Scissor correction with gap set to", exptgap
# Generate DOSs
deltaE = 0.03 #eV
dosE1, dosG1 = CreateDOS(nks1, nbnd1, bzv, -13.0, 25.0, deltaE, bands1, 0)
dosE2, dosG2 = CreateDOS(nks2, nbnd2, bzv, -13.0, 25.0, deltaE, bands2, 0)
# Plot
#PlotDOS(dosE, dosG, "DOS.pdf")
#PlotBandStructure(nbnd, nval, bands, "BS.pdf", 0.0, sympoints, nks_btw_sympoints)
PlotBnDD(nbnd, nval, bands, 0.0, sympoints, nks_btw_sympoints, sym_pt_dists, dosE1, dosG1, dosE2, dosG2, "BSnDOS.pdf")
# DOS
#mydos=dos()
#mydos.Load('dos_full.dat')
#mydos.Printout()
|
gpl-2.0
|
YongseopKim/crosswalk-test-suite
|
webapi/tct-wgtapi02-w3c-tests/inst.apk.py
|
903
|
3180
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
bsd-3-clause
|
jean/apx
|
apx/sprites.py
|
1
|
13169
|
# - coding: utf-8 -
# Copyright (C) 2013-2014 Toms Bauģis <toms.baugis at gmail.com>
import math
import random
from gi.repository import GObject as gobject
from lib import graphics
from lib.pytweener import Easing
from lib import game_utils
from lib import layout
import colors
class Label(layout.Label):
def __init__(self, *args, **kwargs):
layout.Label.__init__(self, *args, **kwargs)
self.font_desc = "04b03 %d" % (kwargs.get("size") or 18)
self.color = kwargs.get("color") or "#eee"
self.cache_as_bitmap=True
class ScoreLabel(Label):
"""a label that takes a score attribute that can be tweened because it's
a number"""
def __init__(self, template="%d", score=0, *args, **kwargs):
Label.__init__(self, *args, **kwargs)
self.template = template
self.score = score
def __setattr__(self, name, val):
Label.__setattr__(self, name, val)
if name == "score":
self.markup = self.template % val
class Cubic(graphics.Sprite):
def __init__(self, color=None, **kwargs):
graphics.Sprite.__init__(self, **kwargs)
self._speeds = {
"fast": 4,
"slow": 2,
}
self.drawing_speed = self._speeds["fast"]
self._speed = "fast"
self.connect("on-render", self.on_render)
self.current_line = None
self.drawing = False
self.snap_to_pixel = True
self.rotation = math.radians(45)
self.color = color or "#eee"
self.width = self.height = 10 # for layout
def on_render(self, sprite):
self.graphics.fill_area(-7.5, -7.5, 14, 14, self.color)
@property
def current_speed(self):
for speed_name, speed in self._speeds.iteritems():
if speed == self.drawing_speed:
return speed_name
return "fast"
def set_drawing(self, drawing):
self.drawing = drawing
if drawing:
self.drawing_speed = self.speed
@property
def speed(self):
return self._speeds[self._speed]
@speed.setter
def speed(self, speed):
speed_number = self._speeds[speed]
# register the fastest speed we used to draw
self.drawing_speed = max(self.speed, speed_number)
self._speed = speed
def blowup(self, callback=None, explode=True):
def kill(sprite, do_callback=False):
sprite.parent.remove_child(sprite)
if do_callback and callback:
callback(self)
for i in range(5):
from_scale, to_scale = 1, self.scale_x + 1 + i * 5
from_opacity, to_opacity = 0.8, 0
if not explode:
to_scale, from_scale = from_scale, to_scale
from_opacity, to_opacity = to_opacity, from_opacity
another_cube = Cubic(x=self.x, y=self.y,
scale_x=from_scale, scale_y=from_scale,
opacity=from_opacity,
z_order=5000-i)
self.parent.add_child(another_cube)
if i == 4:
# we will hang callback to the last of the guys
on_complete = lambda sprite: kill(sprite, True)
else:
on_complete = lambda sprite: kill(sprite)
another_cube.animate(scale_x=to_scale, scale_y=to_scale,
opacity=to_opacity,
duration= (i + 1) / 5.0,
on_complete=on_complete)
def beam_out(self, callback=None):
self.visible = False
self.blowup(callback)
def beam_in(self, callback):
self.visible = True
self.blowup(callback, False)
class Qix(graphics.Sprite):
"""the qix has random movement that tries to stick within the set
degrees of angle, so that it appears to have an agenda"""
def __init__(self, color=None, angle=0, **kwargs):
graphics.Sprite.__init__(self, **kwargs)
# number of steps it takes to get from A to B
self.min_steps = 10
self.max_steps = 30
# the bigger the distance the bigger the hop
# the faster it will move
self.min_distance = 30
self.max_distance = 150
self.next_ticks = 0
self.current_tick = 0
self.dx, self.dy = 0, 0
self.started_moving = False
self.next_x, self.next_y = 0, 0
self.next_distance = 0
self.current_angle = angle
self.claimed = False
self.prev_x, self.prev_y = 0, 0
self.color = color
self.steps = 10
self.current_step = 0
self.shadow_coords = []
self.shadow_count = 15
for i in range(self.shadow_count):
self.add_child(graphics.Rectangle(20, 20, pivot_x=10, pivot_y=10,
fill=graphics.Colors.darker(self.color, i * 5),
opacity=0.8 - (i * 0.7 / self.shadow_count)))
self.connect("on-render", self.on_render)
def on_render(self, sprite):
if not self.debug:
return
self.graphics.move_to(-10, -10)
self.graphics.line_to([(+10, -10),
(+10, +10),
(-10, +10),
(-10, -10)])
self.graphics.stroke("#f00")
def explode(self):
for i, sprite in enumerate(self.sprites):
degree = math.radians(i * 1.0 / len(self.sprites) * 360)
sprite.animate(x=math.cos(degree) * 1200,
y=math.sin(degree) * 1200,
scale_x = 20,
scale_y = 20,
duration=1.4,
# fill="#fff",
rotation=120,
easing=Easing.Expo.ease_in,
)
#self.animate(rotation=10, duration=1.4)
def move(self, game_rects):
if not self.started_moving:
self.started_moving = True
self.next_target(self)
self.current_step += 1
# push us closer to the target
factor = Easing.Linear.ease_in(self.current_step * 1.0 / self.steps)
x = self.prev_x * (1 - factor) + self.next_x * factor
y = self.prev_y * (1 - factor) + self.next_y * factor
self.x, self.y = x, y
self.shadow_coords.insert(0, (x, y))
self.shadow_coords = self.shadow_coords[:self.shadow_count]
if self.current_step == self.steps:
self.next_target()
self._update_children()
def touching_poly(self, poly):
poly_lines = [(dot1, dot2) for dot1, dot2 in zip(poly, poly[1:])]
x1, y1, x2, y2 = int(self.x) - 10, int(self.y) - 10, \
int(self.x) + 10, int(self.y) + 10
qix_box = game_utils._bounding_box((x1, y1), (x2, y2))
# first do a cheap run on the bounding box
if len(poly_lines) > 1:
(xb1, yb1), (xb2, yb2) = game_utils.box_range(poly)
if not any((xb1 <= x <= xb2 and yb1 <= y <= yb2 for (x, y) in qix_box)):
return False
for line1 in zip(qix_box, qix_box[1:]):
for line2 in poly_lines:
if game_utils.intersection(line1, line2):
return True
def next_target(self, sprite=None):
self.prev_x, self.prev_y = self.x, self.y
scene = self.get_scene()
if not scene:
return
game_rects, game_poly = scene.board.game_rects, scene.board.game_area
game_lines = [(dot1, dot2) for dot1, dot2 in zip(game_poly, game_poly[1:])]
delta_angle = 0
angle_range = 180
in_area, stuck = False, 0
while not in_area and stuck < 10:
stuck += 1
delta_angle = self.current_angle - math.radians(angle_range / 2) + random.random() * math.radians(angle_range)
distance = random.randint(self.min_distance, self.max_distance)
x, y = self.x + distance * math.cos(delta_angle), self.y + distance * math.sin(delta_angle)
x, y = int(x), int(y)
dots = [(x, y)]
in_area = all((game_utils.in_area(dot, game_rects) for dot in dots))
if in_area:
# check for overlaps
line_pairs = ((((x, y), (self.x, self.y)), line) for line in game_lines)
for pair in line_pairs:
if game_utils.intersection(*pair):
in_area = False
break
if not in_area:
angle_range += 60
self.current_angle = delta_angle % math.radians(360)
self.steps = random.randint(self.min_steps, self.max_steps)
self.current_step = 0
if stuck < 10:
self.next_x, self.next_y = x, y
self.next_distance = distance
def _update_children(self):
x2, y2 = self.x, self.y
for i, (x, y) in enumerate(self.shadow_coords):
sprite = self.sprites[i]
sprite.x, sprite.y = x - x2 - 10, y - y2 - 10
sprite.rotation += 0.05
class Spark(graphics.Sprite):
__gsignals__ = {
"confused": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self, speed = 3, clockwise=True, **kwargs):
graphics.Sprite.__init__(self, **kwargs)
self.clockwise = clockwise
self.speed = speed
self._polys_stack = []
self.current_line = None
self.frozen = True
self.connect("on-render", self.on_render)
def on_render(self, sprite):
self.graphics.fill_area(-5.5, -5.5, 10, 10, "#fff")
def next(self, dot, poly):
return game_utils.next_dot(dot, poly) if self.clockwise else game_utils.prev_dot(dot, poly)
def show_confusion(self):
self.emit("confused")
def comeback(sprite):
def unfreeze(sprite):
self.frozen = False
self.clockwise = not self.clockwise
self.animate(scale_x=1, scale_y=1,
duration=1,
easing=Easing.Sine.ease_out,
on_complete=unfreeze)
self.frozen = True
self.animate(scale_x=2, scale_y=2,
duration=1,
rotation=self.rotation - math.pi/2,
easing=Easing.Sine.ease_out,
on_complete=comeback)
def move(self, poly):
if self.frozen:
return
dot = (self.x, self.y)
# check if we are still on the new poly, because if we are not, then
# we will keep walking the old one until we get back on track
if game_utils.on_line(dot, poly):
self._polys_stack = [poly]
else:
if poly not in self._polys_stack:
self._polys_stack.append(poly)
if len(self._polys_stack) == 2:
self.show_confusion()
# we go from the freshest to oldest poly to see if we can find ourselves
for i, p in enumerate(reversed(self._polys_stack)):
if game_utils.on_line(dot, p):
poly = p
break
if not poly:
return
dot2 = None
if dot in poly:
dot2 = self.next(dot, poly)
else:
line = game_utils.on_line(dot, poly)
if not line:
return
dot2 = line[1] if self.clockwise else line[0]
# distance is sum because one of them will be the same
speed = self.speed
while speed > 0:
distance = game_utils.distance(dot, dot2)
direction = 1 if any ((a<b for a, b in zip(dot, dot2))) else -1
step_speed = min(speed, distance)
if dot[0] == dot2[0]:
# vertical movement
self.y += step_speed * direction
else:
# horizontal movement
self.x += step_speed * direction
distance = distance - step_speed
if distance == 0:
dot, dot2 = dot2, self.next(dot2, poly)
speed = speed - step_speed
self.current_line = game_utils.on_line((self.x, self.y), poly)
class ClaimedPoly(graphics.Polygon):
def __init__(self, points, poly_type, **kwargs):
kwargs["points"] = points
graphics.Polygon.__init__(self, **kwargs)
self.visible = False
self.cache_as_bitmap = True
self.poly_type = poly_type
def appear(self):
self.visible = True
current_fill = self.fill
self.fill = "#333"
self.line_width = 3
self.animate(0.7, easing=Easing.Cubic.ease_out, fill=current_fill)
def __setattr__(self, name, val):
graphics.Polygon.__setattr__(self, name, val)
if name == "poly_type":
self.fill = getattr(colors, "claim_%s" % val)
self.stroke = graphics.Colors.darker(self.fill, -50)
|
mit
|
mdmintz/seleniumspot
|
seleniumbase/core/s3_manager.py
|
2
|
3083
|
"""
Manager for dealing with uploading/managing files on Amazon S3
"""
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from seleniumbase.config import settings
already_uploaded_files = []
class S3LoggingBucket(object):
"""
A class to upload log files from tests to Amazon S3.
Those files can then be shared easily.
"""
def __init__(self,
log_bucket=settings.S3_LOG_BUCKET,
bucket_url=settings.S3_BUCKET_URL,
selenium_access_key=settings.S3_SELENIUM_ACCESS_KEY,
selenium_secret_key=settings.S3_SELENIUM_SECRET_KEY):
self.conn = S3Connection(selenium_access_key,
selenium_secret_key)
self.bucket = self.conn.get_bucket(log_bucket)
self.bucket_url = bucket_url
def get_key(self, _name):
""" Create a new Key instance with the given name. """
return Key(bucket=self.bucket, name=_name)
def get_bucket(self):
""" Return the bucket being used. """
return self.bucket
def upload_file(self, file_name, file_path):
""" Upload a given file from the file_path to the bucket
with the new name/path file_name. """
upload_key = Key(bucket=self.bucket, name=file_name)
content_type = "text/plain"
if file_name.endswith(".html"):
content_type = "text/html"
elif file_name.endswith(".jpg"):
content_type = "image/jpeg"
elif file_name.endswith(".png"):
content_type = "image/png"
upload_key.set_contents_from_filename(
file_path,
headers={"Content-Type": content_type})
upload_key.url = \
upload_key.generate_url(expires_in=3600).split("?")[0]
try:
upload_key.make_public()
except Exception:
pass
def upload_index_file(self, test_address, timestamp):
""" Create an index.html file with links to all the log files
that were just uploaded. """
global already_uploaded_files
already_uploaded_files = list(set(already_uploaded_files))
already_uploaded_files.sort()
file_name = "%s/%s/index.html" % (test_address, timestamp)
index = self.get_key(file_name)
index_str = []
for completed_file in already_uploaded_files:
index_str.append("<a href='" + self.bucket_url + ""
"%s'>%s</a>" % (completed_file, completed_file))
index.set_contents_from_string(
"<br>".join(index_str),
headers={"Content-Type": "text/html"})
index.make_public()
return "%s%s" % (self.bucket_url, file_name)
def save_uploaded_file_names(self, files):
""" Keep a record of all file names that've been uploaded. Upload log
files related to each test after its execution. Once done, use
already_uploaded_files to create an index file. """
global already_uploaded_files
already_uploaded_files.extend(files)
|
mit
|
rodrigob/keras
|
tests/manual/check_autoencoder.py
|
56
|
5533
|
from __future__ import absolute_import
from __future__ import print_function
from keras.datasets import mnist
from keras.models import Sequential, model_from_config
from keras.layers.core import AutoEncoder, Dense, Activation, TimeDistributedDense, Flatten
from keras.layers.recurrent import LSTM
from keras.layers.embeddings import Embedding
from keras.layers.core import Layer
from keras.layers import containers
from keras.utils import np_utils
import numpy as np
nb_classes = 10
batch_size = 128
nb_epoch = 5
activation = 'linear'
input_dim = 784
hidden_dim = 392
max_train_samples = 5000
max_test_samples = 1000
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, input_dim)[:max_train_samples]
X_test = X_test.reshape(10000, input_dim)[:max_test_samples]
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]
print("X_train: ", X_train.shape)
print("X_test: ", X_test.shape)
##########################
# dense model test #
##########################
print("Training classical fully connected layer for classification")
model_classical = Sequential()
model_classical.add(Dense(input_dim, 10, activation=activation))
model_classical.add(Activation('softmax'))
model_classical.get_config(verbose=1)
model_classical.compile(loss='categorical_crossentropy', optimizer='adam')
model_classical.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=(X_test, Y_test))
classical_score = model_classical.evaluate(X_test, Y_test, verbose=0, show_accuracy=True)
print('\nclassical_score:', classical_score)
##########################
# autoencoder model test #
##########################
def build_lstm_autoencoder(autoencoder, X_train, X_test):
X_train = X_train[:, np.newaxis, :]
X_test = X_test[:, np.newaxis, :]
print("Modified X_train: ", X_train.shape)
print("Modified X_test: ", X_test.shape)
# The TimeDistributedDense isn't really necessary, however you need a lot of GPU memory to do 784x394-394x784
autoencoder.add(TimeDistributedDense(input_dim, 16))
autoencoder.add(AutoEncoder(encoder=LSTM(16, 8, activation=activation, return_sequences=True),
decoder=LSTM(8, input_dim, activation=activation, return_sequences=True),
output_reconstruction=False))
return autoencoder, X_train, X_test
def build_deep_classical_autoencoder(autoencoder):
encoder = containers.Sequential([Dense(input_dim, hidden_dim, activation=activation), Dense(hidden_dim, hidden_dim/2, activation=activation)])
decoder = containers.Sequential([Dense(hidden_dim/2, hidden_dim, activation=activation), Dense(hidden_dim, input_dim, activation=activation)])
autoencoder.add(AutoEncoder(encoder=encoder, decoder=decoder, output_reconstruction=False))
return autoencoder
# Try different things here: 'lstm' or 'classical' or 'denoising'
# or 'deep_denoising'
for autoencoder_type in ['classical', 'lstm']:
print(autoencoder_type)
print('-'*40)
# Build our autoencoder model
autoencoder = Sequential()
if autoencoder_type == 'lstm':
print("Training LSTM AutoEncoder")
autoencoder, X_train, X_test = build_lstm_autoencoder(autoencoder, X_train, X_test)
elif autoencoder_type == 'classical':
print("Training Classical AutoEncoder")
autoencoder = build_deep_classical_autoencoder(autoencoder)
else:
print("Error: unknown autoencoder type!")
exit(-1)
autoencoder.compile(loss='mean_squared_error', optimizer='adam')
# Do NOT use validation data with return output_reconstruction=True
autoencoder.fit(X_train, X_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1)
# Do an inference pass
prefilter_train = autoencoder.predict(X_train, verbose=0)
prefilter_test = autoencoder.predict(X_test, verbose=0)
print("prefilter_train: ", prefilter_train.shape)
print("prefilter_test: ", prefilter_test.shape)
# Classify results from Autoencoder
print("Building classical fully connected layer for classification")
model = Sequential()
if autoencoder_type == 'lstm':
model.add(TimeDistributedDense(8, nb_classes, activation=activation))
model.add(Flatten())
elif autoencoder_type == 'classical':
model.add(Dense(prefilter_train.shape[1], nb_classes, activation=activation))
else:
model.add(Dense(prefilter_train.shape[1], nb_classes, activation=activation))
model.add(Activation('softmax'))
model.get_config(verbose=1)
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(prefilter_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=(prefilter_test, Y_test))
score = model.evaluate(prefilter_test, Y_test, verbose=0, show_accuracy=True)
print('\nscore:', score)
print('Loss change:', (score[0] - classical_score[0])/classical_score[0], '%')
print('Accuracy change:', (score[1] - classical_score[1])/classical_score[1], '%')
# check serialization
config = autoencoder.get_config(verbose=1)
autoencoder = model_from_config(config)
|
mit
|
ModioAB/caramel
|
tests/__init__.py
|
1
|
1242
|
#! /usr/bin/env python
# vim: expandtab shiftwidth=4 softtabstop=4 tabstop=17 filetype=python :
import unittest
from itertools import zip_longest
import transaction
from caramel.models import (
init_session,
DBSession,
)
from . import fixtures
class ModelTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(ModelTestCase, cls).setUpClass()
# Clear existing session, if any.
DBSession.remove()
from sqlalchemy import create_engine
engine = create_engine("sqlite://")
init_session(engine, create=True)
with transaction.manager:
csr = fixtures.CSRData.initial()
csr.save()
def setUp(self):
super(ModelTestCase, self).setUp()
# Always run in a fresh session
DBSession.remove()
def assertSimilar(self, a, b, msg=None):
if isinstance(b, fixtures.SimilarityComparable):
a, b = b, a
if isinstance(a, fixtures.SimilarityComparable):
return self.assertTrue(a.match(b), msg)
return self.assertEqual(a, b, msg)
def assertSimilarSequence(self, seq1, seq2, msg=None):
for a, b in zip_longest(seq1, seq2):
self.assertSimilar(a, b)
|
agpl-3.0
|
sofianehaddad/ot-svn
|
python/test/t_NumericalMathFunction_analytical.py
|
2
|
4116
|
#! /usr/bin/env python
from openturns import *
from math import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
elementaryFunctions = Description(0)
elementaryFunctions.add("sin")
elementaryFunctions.add("cos")
elementaryFunctions.add("tan")
elementaryFunctions.add("asin")
elementaryFunctions.add("acos")
elementaryFunctions.add("atan")
elementaryFunctions.add("sinh")
elementaryFunctions.add("cosh")
elementaryFunctions.add("tanh")
elementaryFunctions.add("asinh")
elementaryFunctions.add("acosh")
elementaryFunctions.add("atanh")
elementaryFunctions.add("log2")
elementaryFunctions.add("log10")
elementaryFunctions.add("log")
elementaryFunctions.add("ln")
elementaryFunctions.add("lngamma")
elementaryFunctions.add("gamma")
elementaryFunctions.add("exp")
elementaryFunctions.add("erf")
elementaryFunctions.add("erfc")
elementaryFunctions.add("sqrt")
elementaryFunctions.add("cbrt")
elementaryFunctions.add("besselJ0")
elementaryFunctions.add("besselJ1")
elementaryFunctions.add("besselY0")
elementaryFunctions.add("besselY1")
elementaryFunctions.add("sign")
elementaryFunctions.add("rint")
elementaryFunctions.add("abs")
# Check the creation of the elementary functions
for i in range(elementaryFunctions.getSize()):
x = NumericalPoint(1, 0.4)
# acosh only defined for 1 <= x <= pi
if elementaryFunctions[i] == "acosh":
x[0] = 1.4
f = NumericalMathFunction(
"x", "2.0*" + elementaryFunctions[i] + "(x)", "y")
print "f=", f
print "f(", x[0], ")=%.4e" % f(x)[0]
analytical_grad = True
try:
df = f.gradient(x)[0, 0]
except:
analytical_grad = False
if analytical_grad:
f.setGradient(CenteredFiniteDifferenceGradient(ResourceMap.GetAsNumericalScalar(
"CenteredFiniteDifferenceGradient-DefaultEpsilon"), f.getEvaluation()))
df2 = f.gradient(x)[0, 0]
print "df(", x[0], ")=%.4e" % df, "df (FD)=%.4e" % df2
if abs(df) > 1e-5:
err_g = abs(df2 / df - 1.)
else:
err_g = abs(df - df2)
if err_g > 1e-5:
print "GRADIENT ERROR! check " + elementaryFunctions[i] + " gradient, err=", err_g
analytical_hess = True
try:
d2f = f.hessian(x)[0, 0, 0]
except:
analytical_hess = False
if analytical_hess:
f.setHessian(CenteredFiniteDifferenceHessian(ResourceMap.GetAsNumericalScalar(
"CenteredFiniteDifferenceHessian-DefaultEpsilon"), f.getEvaluation()))
d2f2 = f.hessian(x)[0, 0, 0]
print "d2f(", x[0], ")=%.4e" % d2f, "d2f (FD)=%.4e" % d2f2
if abs(d2f) > 1e-5:
err_h = abs(d2f2 / d2f - 1.)
else:
err_h = abs(d2f - d2f2)
if err_h > 1e-4:
print "HESSIAN ERROR! check " + elementaryFunctions[i] + " hessian, err=", err_h
nmf = NumericalMathFunction(['x0', 'x1'], ['y0', 'y1'], ['x0+x1', 'x0-x1'])
marginal0 = nmf.getMarginal(0)
marginal1 = nmf.getMarginal(1)
print "marginal 0=", marginal0
print "marginal 1=", marginal1
# test sample as input of a function
formula = ["sin(xi1) + 7. * (sin(xi2)) ^ 2 + 0.1 * xi3^4 * sin(xi1)"]
model = NumericalMathFunction(["xi1", "xi2", "xi3"], ["y"], formula)
# Create an input distribution to calculate reference values
distribution = ComposedDistribution([Uniform(-pi, pi)] * 3)
inSample = NumericalSample(distribution.getSample(100))
resultSample = NumericalSample(model(inSample))
refResultValues = [NumericalPoint(model([inSample[i][0], inSample[i][1], inSample[i][2]]))
for i in range(100)]
print "First reference value : %.4e" % refResultValues[0][0]
print "First result calculated : %.4e" % resultSample[0][0]
except:
import sys
print "t_NumericalMathFunction_analytical.py", sys.exc_type, sys.exc_value
|
mit
|
pchmieli/h2o-3
|
h2o-py/tests/testdir_algos/gbm/pyunit_DEPRECATED_cv_carsGBM.py
|
1
|
6869
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import random
import inspect
def cv_carsGBM():
# read in the dataset and construct training set (and validation set)
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
# choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial,
# 2:multinomial
problem = random.sample(range(3),1)[0]
# pick the predictors and response column, along with the correct distribution
predictors = ["displacement","power","weight","acceleration","year"]
if problem == 1 :
response_col = "economy_20mpg"
distribution = "bernoulli"
cars[response_col] = cars[response_col].asfactor()
elif problem == 2 :
response_col = "cylinders"
distribution = "multinomial"
cars[response_col] = cars[response_col].asfactor()
else :
response_col = "economy"
distribution = "gaussian"
print "Distribution: {0}".format(distribution)
print "Response column: {0}".format(response_col)
## cross-validation
# 1. check that cv metrics are the same over repeated "Modulo" runs
nfolds = random.randint(3,10)
gbm1 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution, ntrees=5,
fold_assignment="Modulo")
gbm2 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution, ntrees=5,
fold_assignment="Modulo")
pyunit_utils.check_models(gbm1, gbm2, True)
# 2. check that cv metrics are different over repeated "Random" runs
nfolds = random.randint(3,10)
gbm1 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution, ntrees=5,
fold_assignment="Random")
gbm2 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution, ntrees=5,
fold_assignment="Random")
try:
pyunit_utils.check_models(gbm1, gbm2, True)
assert False, "Expected models to be different over repeated Random runs"
except AssertionError:
assert True
# 3. folds_column
num_folds = random.randint(2,5)
fold_assignments = h2o.H2OFrame(python_obj=[[random.randint(0,num_folds-1) for f in range(cars.nrow)]])
fold_assignments.set_names(["fold_assignments"])
cars = cars.cbind(fold_assignments)
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], training_frame=cars, distribution=distribution, ntrees=5,
fold_column="fold_assignments", keep_cross_validation_predictions=True)
num_cv_models = len(gbm._model_json['output']['cross_validation_models'])
assert num_cv_models==num_folds, "Expected {0} cross-validation models, but got " \
"{1}".format(num_folds, num_cv_models)
cv_model1 = h2o.get_model(gbm._model_json['output']['cross_validation_models'][0]['name'])
cv_model2 = h2o.get_model(gbm._model_json['output']['cross_validation_models'][1]['name'])
# 4. keep_cross_validation_predictions
cv_predictions = gbm1._model_json['output']['cross_validation_predictions']
assert cv_predictions is None, "Expected cross-validation predictions to be None, but got {0}".format(cv_predictions)
cv_predictions = gbm._model_json['output']['cross_validation_predictions']
assert len(cv_predictions)==num_folds, "Expected the same number of cross-validation predictions " \
"as folds, but got {0}".format(len(cv_predictions))
# # 5. manually construct models
# fold1 = cars[cars["fold_assignments"]==0]
# fold2 = cars[cars["fold_assignments"]==1]
# manual_model1 = h2o.gbm(y=fold2[response_col],
# x=fold2[predictors],
# validation_y=fold1[response_col],
# validation_x=fold1[predictors], ntrees=5,
# distribution=distribution)
# manual_model2 = h2o.gbm(y=fold1[response_col],
# x=fold1[predictors],
# validation_y=fold2[response_col],
# validation_x=fold2[predictors], ntrees=5,
# distribution=distribution)
## boundary cases
# 1. nfolds = number of observations (leave-one-out cross-validation)
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow, distribution=distribution, ntrees=5,
fold_assignment="Modulo")
# 2. nfolds = 0
gbm1 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=0, distribution=distribution, ntrees=5)
# check that this is equivalent to no nfolds
gbm2 = h2o.gbm(y=cars[response_col], x=cars[predictors], distribution=distribution, ntrees=5)
pyunit_utils.check_models(gbm1, gbm2)
# 3. cross-validation and regular validation attempted
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=random.randint(3,10), validation_y=cars[response_col], ntrees=5,
validation_x=cars[predictors], distribution=distribution)
## error cases
# 1. nfolds == 1 or < 0
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=random.sample([-1,1], 1)[0], ntrees=5,
distribution=distribution)
assert False, "Expected model-build to fail when nfolds is 1 or < 0"
except EnvironmentError:
assert True
# 2. more folds than observations
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow+1, distribution=distribution, ntrees=5,
fold_assignment="Modulo")
assert False, "Expected model-build to fail when nfolds > nobs"
except EnvironmentError:
assert True
# 3. fold_column and nfolds both specified
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=3, fold_column="fold_assignments", ntrees=5,
distribution=distribution, training_frame=cars)
assert False, "Expected model-build to fail when fold_column and nfolds both specified"
except EnvironmentError:
assert True
# 4. fold_column and fold_assignment both specified
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], fold_assignment="Random", fold_column="fold_assignments", ntrees=5,
distribution=distribution, training_frame=cars)
assert False, "Expected model-build to fail when fold_column and fold_assignment both specified"
except EnvironmentError:
assert True
if __name__ == "__main__":
pyunit_utils.standalone_test(cv_carsGBM)
else:
cv_carsGBM()
|
apache-2.0
|
SoftwareDefinedBuildings/smap
|
python/smap/drivers/noaaforecast.py
|
6
|
4847
|
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
'''
sMAP driver for NOAA weather forecasts
@author: Andrew Krioukov
'''
import time
import re
import logging
from smap.driver import SmapDriver
from smap.util import periodicSequentialCall
from smap.contrib import dtutil
import dateutil.parser
import urllib, urllib2
import BeautifulSoup
import json
class NOAAElement():
def __init__(self, tag_name, type_name, code_name, units):
self.tag_name = tag_name
self.type_name = type_name
self.code_name = code_name
self.units = units
class NOAAForecast(SmapDriver):
# Map of forecast variable names to NOAA codes
# http://graphical.weather.gov/xml/docs/elementInputNames.php
element_map = {
'temperature' : NOAAElement('temperature', 'hourly', 'temp', 'Fahrenheit'),
'wind-speed' : NOAAElement('wind-speed', 'sustained', 'wspd', 'knots'),
'wind-dir' : NOAAElement('direction', 'wind', 'wdir', 'degrees true'),
'dew-point' : NOAAElement('temperature', 'dew point', 'dew', 'Fahrenheit'),
'humidity' : NOAAElement('humidity', 'relative', 'rh', 'percent'),
'cloud-cover' : NOAAElement('cloud-amount', 'total', 'sky', 'percent'),
'rain' : NOAAElement('precipitation', 'liquid', 'qpf', 'inches'),
}
def setup(self, opts):
args = {
'lat': opts.get('lat'),
'lon': opts.get('lon'),
'product': 'time-series',
'Unit': 'e',
}
elements = [x.strip() for x in opts.get('elements').split(',') if x.strip() in self.element_map]
baseurl = 'http://graphical.weather.gov/xml/sample_products/browser_interface/ndfdXMLclient.php'
self.set_metadata('/', {
'Location/Uri' : baseurl,
'Metadata/Location/Latitude' : args['lat'],
'Metadata/Location/Longitude' : args['lon'],
})
for name in elements:
e = self.element_map[name]
args[e.code_name] = e.code_name
self.add_timeseries('/' + name, e.units, data_type='double')
self.url = baseurl + '?' + urllib.urlencode(args)
def start(self):
periodicSequentialCall(self.read).start(60*60)
def read(self):
for retry_time in [0, 30, 5*60]:
time.sleep(retry_time)
try:
print "Reading"
data = urllib2.urlopen(self.url, timeout = 30).read()
times = {}
b = BeautifulSoup.BeautifulSoup(data)
# Parse time blocks
data = b.find('data')
for time_block in data.findAll('time-layout'):
key = time_block.find('layout-key').contents[0]
time_list = []
for time_tag in time_block.findAll('start-valid-time'):
#dt = datetime.datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%S%z')
dt = dateutil.parser.parse(time_tag.string)
time_list.append(dtutil.dt2ts(dt))
times[key] = time_list
# For each value block find referenced time block and add readings
for data_block in data.find('parameters').findAll(recursive=False):
key = data_block['time-layout']
# Find the element being returned
for (name, e) in self.element_map.items():
if e.tag_name == data_block.name and e.type_name == data_block['type']:
# Element found
value = []
for v in data_block.findAll('value'):
value.append(float(v.string))
for t,v in zip(times[key], value):
self.add('/'+name, int(t), v)
break
return
except Exception, e:
print e
# Error occured retry
|
bsd-2-clause
|
tinkerthaler/odoo
|
addons/account_asset/wizard/account_asset_change_duration.py
|
84
|
6005
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
class asset_modify(osv.osv_memory):
_name = 'asset.modify'
_description = 'Modify Asset'
_columns = {
'name': fields.char('Reason', required=True),
'method_number': fields.integer('Number of Depreciations', required=True),
'method_period': fields.integer('Period Length'),
'method_end': fields.date('Ending date'),
'note': fields.text('Notes'),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" Returns views and fields for current model.
@param cr: A database cursor
@param user: ID of the user currently logged in
@param view_id: list of fields, which required to read signatures
@param view_type: defines a view type. it can be one of (form, tree, graph, calender, gantt, search, mdx)
@param context: context arguments, like lang, time zone
@param toolbar: contains a list of reports, wizards, and links related to current model
@return: Returns a dictionary that contains definition for fields, views, and toolbars
"""
if not context:
context = {}
asset_obj = self.pool.get('account.asset.asset')
result = super(asset_modify, self).fields_view_get(cr, uid, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
asset_id = context.get('active_id', False)
active_model = context.get('active_model', '')
if active_model == 'account.asset.asset' and asset_id:
asset = asset_obj.browse(cr, uid, asset_id, context=context)
doc = etree.XML(result['arch'])
if asset.method_time == 'number':
node = doc.xpath("//field[@name='method_end']")[0]
node.set('invisible', '1')
elif asset.method_time == 'end':
node = doc.xpath("//field[@name='method_number']")[0]
node.set('invisible', '1')
result['arch'] = etree.tostring(doc)
return result
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if not context:
context = {}
asset_obj = self.pool.get('account.asset.asset')
res = super(asset_modify, self).default_get(cr, uid, fields, context=context)
asset_id = context.get('active_id', False)
asset = asset_obj.browse(cr, uid, asset_id, context=context)
if 'name' in fields:
res.update({'name': asset.name})
if 'method_number' in fields and asset.method_time == 'number':
res.update({'method_number': asset.method_number})
if 'method_period' in fields:
res.update({'method_period': asset.method_period})
if 'method_end' in fields and asset.method_time == 'end':
res.update({'method_end': asset.method_end})
return res
def modify(self, cr, uid, ids, context=None):
""" Modifies the duration of asset for calculating depreciation
and maintains the history of old values.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of Ids
@param context: A standard dictionary
@return: Close the wizard.
"""
if not context:
context = {}
asset_obj = self.pool.get('account.asset.asset')
history_obj = self.pool.get('account.asset.history')
asset_id = context.get('active_id', False)
asset = asset_obj.browse(cr, uid, asset_id, context=context)
data = self.browse(cr, uid, ids[0], context=context)
history_vals = {
'asset_id': asset_id,
'name': data.name,
'method_time': asset.method_time,
'method_number': asset.method_number,
'method_period': asset.method_period,
'method_end': asset.method_end,
'user_id': uid,
'date': time.strftime('%Y-%m-%d'),
'note': data.note,
}
history_obj.create(cr, uid, history_vals, context=context)
asset_vals = {
'method_number': data.method_number,
'method_period': data.method_period,
'method_end': data.method_end,
}
asset_obj.write(cr, uid, [asset_id], asset_vals, context=context)
asset_obj.compute_depreciation_board(cr, uid, [asset_id], context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
b0ttl3z/SickRage
|
lib/guessit/rules/properties/screen_size.py
|
20
|
2893
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
screen_size property
"""
from rebulk.remodule import re
from rebulk import Rebulk, Rule, RemoveMatch
from ..common.validators import seps_surround
from ..common import dash
def screen_size():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
def conflict_solver(match, other):
"""
Conflict solver for most screen_size.
"""
if other.name == 'screen_size':
if 'resolution' in other.tags:
# The chtouile to solve conflict in "720 x 432" string matching both 720p pattern
int_value = _digits_re.findall(match.raw)[-1]
if other.value.startswith(int_value):
return match
return other
return '__default__'
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE)
rebulk.defaults(name="screen_size", validator=seps_surround, conflict_solver=conflict_solver)
rebulk.regex(r"(?:\d{3,}(?:x|\*))?360(?:i|p?x?)", value="360p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?368(?:i|p?x?)", value="368p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?480(?:i|p?x?)", value="480p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?576(?:i|p?x?)", value="576p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?720(?:i|p?(?:50|60)?x?)", value="720p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?720(?:p(?:50|60)?x?)", value="720p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?720p?hd", value="720p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?900(?:i|p?x?)", value="900p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080i", value="1080i")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080p?x?", value="1080p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080(?:p(?:50|60)?x?)", value="1080p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080p?hd", value="1080p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?2160(?:i|p?x?)", value="4K")
_digits_re = re.compile(r'\d+')
rebulk.defaults(name="screen_size", validator=seps_surround)
rebulk.regex(r'\d{3,}-?(?:x|\*)-?\d{3,}',
formatter=lambda value: 'x'.join(_digits_re.findall(value)),
abbreviations=[dash],
tags=['resolution'],
conflict_solver=lambda match, other: '__default__' if other.name == 'screen_size' else other)
rebulk.rules(ScreenSizeOnlyOne)
return rebulk
class ScreenSizeOnlyOne(Rule):
"""
Keep a single screen_size pet filepath part.
"""
consequence = RemoveMatch
def when(self, matches, context):
to_remove = []
for filepart in matches.markers.named('path'):
screensize = list(reversed(matches.range(filepart.start, filepart.end,
lambda match: match.name == 'screen_size')))
if len(screensize) > 1:
to_remove.extend(screensize[1:])
return to_remove
|
gpl-3.0
|
gymnasium/edx-platform
|
lms/djangoapps/certificates/management/commands/gen_cert_report.py
|
11
|
4330
|
"""
Generate a report of certificate statuses
"""
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Count
from opaque_keys.edx.keys import CourseKey
from six import text_type
from lms.djangoapps.certificates.models import GeneratedCertificate
class Command(BaseCommand):
"""
Management command to generate a certificate status
report for a given course.
"""
help = """
Generate a certificate status report for a given course.
This command does not do anything other than report the current
certificate status.
generating - A request has been made to generate a certificate,
but it has not been generated yet.
regenerating - A request has been made to regenerate a certificate,
but it has not been generated yet.
deleting - A request has been made to delete a certificate.
deleted - The certificate has been deleted.
downloadable - The certificate is available for download.
notpassing - The student was graded but is not passing
"""
def add_arguments(self, parser):
parser.add_argument(
'-c', '--course',
metavar='COURSE_ID',
dest='course',
default=None,
help='Only generate for COURSE_ID'
)
def handle(self, *args, **options):
# Find all courses that have ended
if options['course']:
course_id = CourseKey.from_string(options['course'])
else:
raise CommandError("You must specify a course")
cert_data = {}
# find students who are active
# number of enrolled students = downloadable + notpassing
print "Looking up certificate states for {0}".format(options['course'])
enrolled_current = User.objects.filter(
courseenrollment__course_id=course_id,
courseenrollment__is_active=True
)
enrolled_total = User.objects.filter(
courseenrollment__course_id=course_id
)
verified_enrolled = GeneratedCertificate.objects.filter( # pylint: disable=no-member
course_id__exact=course_id,
mode__exact='verified'
)
honor_enrolled = GeneratedCertificate.objects.filter( # pylint: disable=no-member
course_id__exact=course_id,
mode__exact='honor'
)
audit_enrolled = GeneratedCertificate.objects.filter( # pylint: disable=no-member
course_id__exact=course_id,
mode__exact='audit'
)
cert_data[course_id] = {
'enrolled_current': enrolled_current.count(),
'enrolled_total': enrolled_total.count(),
'verified_enrolled': verified_enrolled.count(),
'honor_enrolled': honor_enrolled.count(),
'audit_enrolled': audit_enrolled.count()
}
status_tally = GeneratedCertificate.objects.filter( # pylint: disable=no-member
course_id__exact=course_id
).values('status').annotate(
dcount=Count('status')
)
cert_data[course_id].update(
{
status['status']: status['dcount'] for status in status_tally
}
)
mode_tally = GeneratedCertificate.objects.filter( # pylint: disable=no-member
course_id__exact=course_id,
status__exact='downloadable'
).values('mode').annotate(
dcount=Count('mode')
)
cert_data[course_id].update(
{mode['mode']: mode['dcount'] for mode in mode_tally}
)
# all states we have seen far all courses
status_headings = sorted(
set([status for course in cert_data for status in cert_data[course]])
)
# print the heading for the report
print "{:>26}".format("course ID"),
print ' '.join(["{:>16}".format(heading) for heading in status_headings])
# print the report
print "{0:>26}".format(text_type(course_id)),
for heading in status_headings:
if heading in cert_data[course_id]:
print "{:>16}".format(cert_data[course_id][heading]),
else:
print " " * 16,
print
|
agpl-3.0
|
and2egg/philharmonic
|
philharmonic/tests/test_explorer.py
|
1
|
1636
|
from mock import patch
from nose.tools import *
import pandas as pd
import philharmonic
@patch('philharmonic.simulator.simulator.run')
def test_explore_ga_weights(mock_run):
philharmonic._setup('philharmonic.settings.ga_explore')
from philharmonic import conf
conf.parameter_space = 'GAWeights'
from philharmonic.explorer import explore
mock_run.return_value = {'Total cost ($)': 0.5}
with patch.object(philharmonic.explorer, '_serialise_results',
return_value=None) as mock_serialise:
explore()
@patch('philharmonic.simulator.simulator.run')
def test_explore_time_offsets(mock_run):
philharmonic._setup('philharmonic.settings.ga_explore')
from philharmonic import conf
conf.parameter_space = 'TimeOffsets'
from philharmonic.explorer import explore
mock_run.return_value = {'Total cost ($)': 0.5}
with patch.object(philharmonic.explorer, '_serialise_results',
return_value=None) as mock_serialise:
explore()
def test_time_offsets():
philharmonic._setup('philharmonic.settings.ga_explore')
from philharmonic import conf
conf.start = pd.Timestamp('2010-06-03 00:00')
conf.times = pd.date_range(conf.start, periods=3, freq='H')
conf.end = conf.times[-1]
conf.time_offsets_step = pd.offsets.DateOffset(months=2)
conf.time_offsets_start = pd.offsets.Hour(0) # the offset of the first run
conf.time_offsets_max = pd.offsets.DateOffset(months=11, days=20)
from philharmonic.explorer import TimeOffsets
combinations = TimeOffsets().combinations
assert_equals(combinations.shape, (6, 2))
|
gpl-3.0
|
keedio/hue
|
desktop/core/ext-py/Django-1.6.10/django/template/loader_tags.py
|
105
|
10201
|
from collections import defaultdict
from django.conf import settings
from django.template.base import TemplateSyntaxError, Library, Node, TextNode,\
token_kwargs, Variable
from django.template.loader import get_template
from django.utils.safestring import mark_safe
from django.utils import six
register = Library()
BLOCK_CONTEXT_KEY = 'block_context'
class ExtendsError(Exception):
pass
class BlockContext(object):
def __init__(self):
# Dictionary of FIFO queues.
self.blocks = defaultdict(list)
def add_blocks(self, blocks):
for name, block in six.iteritems(blocks):
self.blocks[name].insert(0, block)
def pop(self, name):
try:
return self.blocks[name].pop()
except IndexError:
return None
def push(self, name, block):
self.blocks[name].append(block)
def get_block(self, name):
try:
return self.blocks[name][-1]
except IndexError:
return None
class BlockNode(Node):
def __init__(self, name, nodelist, parent=None):
self.name, self.nodelist, self.parent = name, nodelist, parent
def __repr__(self):
return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
block_context = context.render_context.get(BLOCK_CONTEXT_KEY)
context.push()
if block_context is None:
context['block'] = self
result = self.nodelist.render(context)
else:
push = block = block_context.pop(self.name)
if block is None:
block = self
# Create new block so we can store context without thread-safety issues.
block = BlockNode(block.name, block.nodelist)
block.context = context
context['block'] = block
result = block.nodelist.render(context)
if push is not None:
block_context.push(self.name, push)
context.pop()
return result
def super(self):
render_context = self.context.render_context
if (BLOCK_CONTEXT_KEY in render_context and
render_context[BLOCK_CONTEXT_KEY].get_block(self.name) is not None):
return mark_safe(self.render(self.context))
return ''
class ExtendsNode(Node):
must_be_first = True
def __init__(self, nodelist, parent_name, template_dirs=None):
self.nodelist = nodelist
self.parent_name = parent_name
self.template_dirs = template_dirs
self.blocks = dict([(n.name, n) for n in nodelist.get_nodes_by_type(BlockNode)])
def __repr__(self):
return '<ExtendsNode: extends %s>' % self.parent_name.token
def get_parent(self, context):
parent = self.parent_name.resolve(context)
if not parent:
error_msg = "Invalid template name in 'extends' tag: %r." % parent
if self.parent_name.filters or\
isinstance(self.parent_name.var, Variable):
error_msg += " Got this from the '%s' variable." %\
self.parent_name.token
raise TemplateSyntaxError(error_msg)
if hasattr(parent, 'render'):
return parent # parent is a Template object
return get_template(parent)
def render(self, context):
compiled_parent = self.get_parent(context)
if BLOCK_CONTEXT_KEY not in context.render_context:
context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
block_context = context.render_context[BLOCK_CONTEXT_KEY]
# Add the block nodes from this node to the block context
block_context.add_blocks(self.blocks)
# If this block's parent doesn't have an extends node it is the root,
# and its block nodes also need to be added to the block context.
for node in compiled_parent.nodelist:
# The ExtendsNode has to be the first non-text node.
if not isinstance(node, TextNode):
if not isinstance(node, ExtendsNode):
blocks = dict([(n.name, n) for n in
compiled_parent.nodelist.get_nodes_by_type(BlockNode)])
block_context.add_blocks(blocks)
break
# Call Template._render explicitly so the parser context stays
# the same.
return compiled_parent._render(context)
class BaseIncludeNode(Node):
def __init__(self, *args, **kwargs):
self.extra_context = kwargs.pop('extra_context', {})
self.isolated_context = kwargs.pop('isolated_context', False)
super(BaseIncludeNode, self).__init__(*args, **kwargs)
def render_template(self, template, context):
values = dict([(name, var.resolve(context)) for name, var
in six.iteritems(self.extra_context)])
if self.isolated_context:
return template.render(context.new(values))
context.update(values)
output = template.render(context)
context.pop()
return output
class ConstantIncludeNode(BaseIncludeNode):
def __init__(self, template_path, *args, **kwargs):
super(ConstantIncludeNode, self).__init__(*args, **kwargs)
try:
t = get_template(template_path)
self.template = t
except:
if settings.TEMPLATE_DEBUG:
raise
self.template = None
def render(self, context):
if not self.template:
return ''
return self.render_template(self.template, context)
class IncludeNode(BaseIncludeNode):
def __init__(self, template_name, *args, **kwargs):
super(IncludeNode, self).__init__(*args, **kwargs)
self.template_name = template_name
def render(self, context):
try:
template_name = self.template_name.resolve(context)
template = get_template(template_name)
return self.render_template(template, context)
except:
if settings.TEMPLATE_DEBUG:
raise
return ''
@register.tag('block')
def do_block(parser, token):
"""
Define a block that can be overridden by child templates.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' tag takes only one argument" % bits[0])
block_name = bits[1]
# Keep track of the names of BlockNodes found in this template, so we can
# check for duplication.
try:
if block_name in parser.__loaded_blocks:
raise TemplateSyntaxError("'%s' tag with name '%s' appears more than once" % (bits[0], block_name))
parser.__loaded_blocks.append(block_name)
except AttributeError: # parser.__loaded_blocks isn't a list yet
parser.__loaded_blocks = [block_name]
nodelist = parser.parse(('endblock',))
# This check is kept for backwards-compatibility. See #3100.
endblock = parser.next_token()
acceptable_endblocks = ('endblock', 'endblock %s' % block_name)
if endblock.contents not in acceptable_endblocks:
parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks)
return BlockNode(block_name, nodelist)
@register.tag('extends')
def do_extends(parser, token):
"""
Signal that this template extends a parent template.
This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)
uses the literal value "base" as the name of the parent template to extend,
or ``{% extends variable %}`` uses the value of ``variable`` as either the
name of the parent template to extend (if it evaluates to a string) or as
the parent template itself (if it evaluates to a Template object).
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once in the same template" % bits[0])
return ExtendsNode(nodelist, parent_name)
@register.tag('include')
def do_include(parser, token):
"""
Loads a template and renders it with the current context. You can pass
additional context using keyword arguments.
Example::
{% include "foo/some_include" %}
{% include "foo/some_include" with bar="BAZZ!" baz="BING!" %}
Use the ``only`` argument to exclude the current context when rendering
the included template::
{% include "foo/some_include" only %}
{% include "foo/some_include" with bar="1" only %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("%r tag takes at least one argument: the name of the template to be included." % bits[0])
options = {}
remaining_bits = bits[2:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=False)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'only':
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
isolated_context = options.get('only', False)
namemap = options.get('with', {})
path = bits[1]
if path[0] in ('"', "'") and path[-1] == path[0]:
return ConstantIncludeNode(path[1:-1], extra_context=namemap,
isolated_context=isolated_context)
return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap,
isolated_context=isolated_context)
|
apache-2.0
|
christabor/MoAL
|
MOAL/computer_organization/bcd.py
|
1
|
2704
|
# -*- coding: utf-8 -*-
__author__ = """Chris Tabor ([email protected])"""
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from MOAL.helpers.display import Section
from MOAL.helpers.display import divider
from MOAL.helpers.display import print_h4
from MOAL.computer_organization.data_types import BaseDataType
from MOAL.computer_organization import numerical_encoding_basic as encoders
DEBUG = True if __name__ == '__main__' else False
def _show_bcd(num, decimals, binary, bcd):
print('Value = {}\nD {}\nB {}\nC {}'.format(num, decimals, binary, bcd))
divider(atom='<')
def dec_to_bcd_8421(num):
"""Convert a decimal to binary, and decompress into Binary Coded Decimal.
Adds trailing bits to the left to enforce a 4-bit "nibble" on all digits.
Uses 8421 notation [see wikipedia.org/wiki/Binary-coded_decimal]"""
bcd, binary, decimals = '', '', ''
for digit in str(num):
binval = encoders.dec_to_bin(int(digit))
binary += '{}{}'.format(binval, ' ' * (4 - len(binval) + 1))
if len(binval) < 4:
binval = binval.zfill(4)
bcd += '{} '.format(binval)
decimals += digit + (' ' * 4)
_show_bcd(num, decimals, binary, bcd)
return bcd
def dec_to_bcd_excess3(num, bias=3):
"""Converts a binary to Binary Coded Decimal, then converts again to
excess-3 BCD, which has a 'bit bias' of `bias`, where bits are
shifted by the given bias. See wikipedia.org/wiki/Excess-3 for more."""
bcd, binary, decimals = '', '', ''
for digit in str(num):
binval = encoders.dec_to_bin(int(digit))
binval = BaseDataType.add(str(binval), bias)
binary += '{}{}'.format(binval, ' ' * (4 - len(binval) + 1))
if len(binval) < 4:
binval = binval.zfill(4)
bcd += '{} '.format(binval)
decimals += digit + (' ' * 4)
_show_bcd(num, decimals, binary, bcd)
return bcd
if DEBUG:
with Section('Numerical encoding: Binary Coded Decimal (BCD)'):
"""More exist, but are not covered here.
See books.google.com/books?id=0f-6diYBV0wC&lpg
=PA48&ots=jG6NiHY3he&dq=bcd%207421&pg
=PA51#v=onepage&q=bcd%207421&f=false For more examples."""
print('D = Decimal, B = Binary, C = Binary Coded Decimal')
nums = [
1, 2, 4, 16, 32, 64, 128, 256, 512, 1024, 2048, 1234,
12345, 123456, 1234567, 12345678, 123456789]
print_h4('BCD', desc='8421 encoding')
for num in nums:
dec_to_bcd_8421(num)
print_h4('BCD', desc='Excess-3 (bias) encoding')
for num in nums:
dec_to_bcd_excess3(num)
|
apache-2.0
|
svebk/DeepSentiBank_memex
|
workflows/mark-precomp-sim/mark-precomp-sim.py
|
1
|
1410
|
import json
from pyspark import SparkContext, SparkConf
def prepare_mark_precomp(data):
key = str(data).rstrip()
#print("[prepare_mark_precomp] data: {}".format(data))
return [(key, [key, "info", "precomp_sim", "True"])]
def mark_precomp_sim(hbase_man_in, hbase_man_out):
in_rdd = hbase_man_in.read_hbase_table()
existing_sims = in_rdd.keys()
existing_sims_count = existing_sims.count()
print("existing_sims count: {}".format(existing_sims_count))
sample_existing_sims = existing_sims.first()
print("existing_sims first: {}".format(sample_existing_sims))
out_rdd = existing_sims.flatMap(prepare_mark_precomp)
sample_out_rdd = out_rdd.take(5)
print("out_rdd sample: {}".format(sample_out_rdd))
hbase_man_out.rdd2hbase(out_rdd)
if __name__ == '__main__':
from hbase_manager import HbaseManager
job_conf = json.load(open("job_conf.json","rt"))
print job_conf
tab_sim_name = job_conf["tab_sim"]
tab_sha1_infos_name = job_conf["tab_sha1_infos"]
hbase_host = job_conf["hbase_host"]
sc = SparkContext(appName='mark-precomp-sim_from_'+tab_sim_name+'_to'+tab_sha1_infos_name)
sc.setLogLevel("ERROR")
conf = SparkConf()
hbase_man_in = HbaseManager(sc, conf, hbase_host, tab_sim_name)
hbase_man_out = HbaseManager(sc, conf, hbase_host, tab_sha1_infos_name)
mark_precomp_sim(hbase_man_in, hbase_man_out)
|
bsd-2-clause
|
sberrevoets/scottberrevoets.com
|
pelicanconf.py
|
1
|
1886
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# Site information
AUTHOR = 'Scott Berrevoets'
SITENAME = 'Scott Berrevoets'
SITEURL = 'https://scottberrevoets.com'
# Show line numbers in code snippets
MARKDOWN = {
'extension_configs': {
'markdown.extensions.codehilite': {'css_class': 'highlight', 'linenums': True},
'markdown.extensions.extra': {},
'markdown.extensions.meta': {},
},
'output_format': 'html5',
}
# Path specifications
RELATIVE_URLS = True
PATH = 'content'
THEME = './theme'
ARTICLE_URL = '{date:%Y}/{date:%m}/{date:%d}/{slug}/'
ARTICLE_SAVE_AS = '{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
PAGE_URL = '{slug}.html'
PAGE_SAVE_AS = '{slug}.html'
# Pagination
DEFAULT_PAGINATION = 5
PAGINATION_PATTERNS = (
(1, '{base_name}/', '{base_name}/index.html'),
(2, '{base_name}/page/{number}/', '{base_name}/page/{number}/index.html'),
)
# Create and link to index and archive
DIRECT_TEMPLATES = ('index', 'archive')
MENUITEMS = [('Blog', '/'), ('Archive', '/archive.html')]
# Disable authors, categories, and tags
AUTHOR_SAVE_AS = False
CATEGORY_SAVE_AS = False
TAG_SAVE_AS = False
# Locale settings
DEFAULT_LANG = 'en'
TIMEZONE = 'America/Los_Angeles'
DATE_FORMATS = { 'en': '%d %B %Y' }
# Social media
DISPLAY_SOCIAL_MEDIA = True
SOCIALMEDIAITEMS = [('twitter.svg', 'https://twitter.com/ScottBerrevoets'),
('github.svg', 'https://github.com/sberrevoets'),
('stackoverflow.svg', 'http://stackoverflow.com/users/751268/scott-berrevoets'),
('youtube.svg', 'https://www.youtube.com/watch?v=dQw4w9WgXcQ')]
# No feeds in development mode
FEED_DOMAIN = SITEURL
FEED_ATOM = None
FEED_RSS = None
FEED_ALL_ATOM = None
FEED_ALL_RSS = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
|
mit
|
woggle/mesos-old
|
third_party/boto-2.0b2/boto/ec2/regioninfo.py
|
44
|
1524
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.regioninfo import RegionInfo
class EC2RegionInfo(RegionInfo):
"""
Represents an EC2 Region
"""
def __init__(self, connection=None, name=None, endpoint=None):
from boto.ec2.connection import EC2Connection
RegionInfo.__init__(self, connection, name, endpoint,
EC2Connection)
|
apache-2.0
|
tensorflow/privacy
|
research/pate_2017/input.py
|
1
|
13234
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import math
import os
import sys
import tarfile
import numpy as np
from scipy.io import loadmat as loadmat
from six.moves import cPickle as pickle
from six.moves import urllib
from six.moves import xrange
import tensorflow.compat.v1 as tf
FLAGS = tf.flags.FLAGS
def create_dir_if_needed(dest_directory):
"""Create directory if doesn't exist."""
if not tf.gfile.IsDirectory(dest_directory):
tf.gfile.MakeDirs(dest_directory)
return True
def maybe_download(file_urls, directory):
"""Download a set of files in temporary local folder."""
# Create directory if doesn't exist
assert create_dir_if_needed(directory)
# This list will include all URLS of the local copy of downloaded files
result = []
# For each file of the dataset
for file_url in file_urls:
# Extract filename
filename = file_url.split('/')[-1]
# If downloading from GitHub, remove suffix ?raw=True from local filename
if filename.endswith("?raw=true"):
filename = filename[:-9]
# Deduce local file url
#filepath = os.path.join(directory, filename)
filepath = directory + '/' + filename
# Add to result list
result.append(filepath)
# Test if file already exists
if not tf.gfile.Exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(file_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return result
def image_whitening(data):
"""
Subtracts mean of image and divides by adjusted standard variance (for
stability). Operations are per image but performed for the entire array.
"""
assert len(np.shape(data)) == 4
# Compute number of pixels in image
nb_pixels = np.shape(data)[1] * np.shape(data)[2] * np.shape(data)[3]
# Subtract mean
mean = np.mean(data, axis=(1, 2, 3))
ones = np.ones(np.shape(data)[1:4], dtype=np.float32)
for i in xrange(len(data)):
data[i, :, :, :] -= mean[i] * ones
# Compute adjusted standard variance
adj_std_var = np.maximum(np.ones(len(data), dtype=np.float32) / math.sqrt(nb_pixels), np.std(data, axis=(1, 2, 3))) # pylint: disable=line-too-long
# Divide image
for i in xrange(len(data)):
data[i, :, :, :] = data[i, :, :, :] / adj_std_var[i]
print(np.shape(data))
return data
def extract_svhn(local_url):
"""Extract a MATLAB matrix into two numpy arrays with data and labels."""
with tf.gfile.Open(local_url, mode='r') as file_obj:
# Load MATLAB matrix using scipy IO
data_dict = loadmat(file_obj)
# Extract each dictionary (one for data, one for labels)
data, labels = data_dict['X'], data_dict['y']
# Set np type
data = np.asarray(data, dtype=np.float32)
labels = np.asarray(labels, dtype=np.int32)
# Transpose data to match TF model input format
data = data.transpose(3, 0, 1, 2)
# Fix the SVHN labels which label 0s as 10s
labels[labels == 10] = 0
# Fix label dimensions
labels = labels.reshape(len(labels))
return data, labels
def unpickle_cifar_dic(file_path):
"""Helper function: unpickles a dictionary (used for loading CIFAR)."""
file_obj = open(file_path, 'rb')
data_dict = pickle.load(file_obj)
file_obj.close()
return data_dict['data'], data_dict['labels']
def extract_cifar10(local_url, data_dir):
"""Extracts CIFAR-10 and return numpy arrays with the different sets."""
# These numpy dumps can be reloaded to avoid performing the pre-processing
# if they exist in the working directory.
# Changing the order of this list will ruin the indices below.
preprocessed_files = ['/cifar10_train.npy',
'/cifar10_train_labels.npy',
'/cifar10_test.npy',
'/cifar10_test_labels.npy']
all_preprocessed = True
for file_name in preprocessed_files:
if not tf.gfile.Exists(data_dir + file_name):
all_preprocessed = False
break
if all_preprocessed:
# Reload pre-processed training data from numpy dumps
with tf.gfile.Open(data_dir + preprocessed_files[0], mode='r') as file_obj:
train_data = np.load(file_obj)
with tf.gfile.Open(data_dir + preprocessed_files[1], mode='r') as file_obj:
train_labels = np.load(file_obj)
# Reload pre-processed testing data from numpy dumps
with tf.gfile.Open(data_dir + preprocessed_files[2], mode='r') as file_obj:
test_data = np.load(file_obj)
with tf.gfile.Open(data_dir + preprocessed_files[3], mode='r') as file_obj:
test_labels = np.load(file_obj)
else:
# Do everything from scratch
# Define lists of all files we should extract
train_files = ['data_batch_' + str(i) for i in xrange(1, 6)]
test_file = ['test_batch']
cifar10_files = train_files + test_file
# Check if all files have already been extracted
need_to_unpack = False
for file_name in cifar10_files:
if not tf.gfile.Exists(file_name):
need_to_unpack = True
break
# We have to unpack the archive
if need_to_unpack:
tarfile.open(local_url, 'r:gz').extractall(data_dir)
# Load training images and labels
images = []
labels = []
for train_file in train_files:
# Construct filename
filename = data_dir + '/cifar-10-batches-py/' + train_file
# Unpickle dictionary and extract images and labels
images_tmp, labels_tmp = unpickle_cifar_dic(filename)
# Append to lists
images.append(images_tmp)
labels.append(labels_tmp)
# Convert to numpy arrays and reshape in the expected format
train_data = np.asarray(images, dtype=np.float32)
train_data = train_data.reshape((50000, 3, 32, 32))
train_data = np.swapaxes(train_data, 1, 3)
train_labels = np.asarray(labels, dtype=np.int32).reshape(50000)
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[0], train_data)
np.save(data_dir + preprocessed_files[1], train_labels)
# Construct filename for test file
filename = data_dir + '/cifar-10-batches-py/' + test_file[0]
# Load test images and labels
test_data, test_images = unpickle_cifar_dic(filename)
# Convert to numpy arrays and reshape in the expected format
test_data = np.asarray(test_data, dtype=np.float32)
test_data = test_data.reshape((10000, 3, 32, 32))
test_data = np.swapaxes(test_data, 1, 3)
test_labels = np.asarray(test_images, dtype=np.int32).reshape(10000)
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[2], test_data)
np.save(data_dir + preprocessed_files[3], test_labels)
return train_data, train_labels, test_data, test_labels
def extract_mnist_data(filename, num_images, image_size, pixel_depth):
"""
Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
if not tf.gfile.Exists(filename+'.npy'):
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(image_size * image_size * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (pixel_depth / 2.0)) / pixel_depth
data = data.reshape(num_images, image_size, image_size, 1)
np.save(filename, data)
return data
else:
with tf.gfile.Open(filename+'.npy', mode='rb') as file_obj:
return np.load(file_obj)
def extract_mnist_labels(filename, num_images):
"""
Extract the labels into a vector of int64 label IDs.
"""
if not tf.gfile.Exists(filename+'.npy'):
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int32)
np.save(filename, labels)
return labels
else:
with tf.gfile.Open(filename+'.npy', mode='rb') as file_obj:
return np.load(file_obj)
def ld_svhn(extended=False, test_only=False):
"""
Load the original SVHN data
Args:
extended: include extended training data in the returned array
test_only: disables loading of both train and extra -> large speed up
"""
# Define files to be downloaded
# WARNING: changing the order of this list will break indices (cf. below)
file_urls = ['http://ufldl.stanford.edu/housenumbers/train_32x32.mat',
'http://ufldl.stanford.edu/housenumbers/test_32x32.mat',
'http://ufldl.stanford.edu/housenumbers/extra_32x32.mat']
# Maybe download data and retrieve local storage urls
local_urls = maybe_download(file_urls, FLAGS.data_dir)
# Extra Train, Test, and Extended Train data
if not test_only:
# Load and applying whitening to train data
train_data, train_labels = extract_svhn(local_urls[0])
train_data = image_whitening(train_data)
# Load and applying whitening to extended train data
ext_data, ext_labels = extract_svhn(local_urls[2])
ext_data = image_whitening(ext_data)
# Load and applying whitening to test data
test_data, test_labels = extract_svhn(local_urls[1])
test_data = image_whitening(test_data)
if test_only:
return test_data, test_labels
else:
if extended:
# Stack train data with the extended training data
train_data = np.vstack((train_data, ext_data))
train_labels = np.hstack((train_labels, ext_labels))
return train_data, train_labels, test_data, test_labels
else:
# Return training and extended training data separately
return train_data, train_labels, test_data, test_labels, ext_data, ext_labels
def ld_cifar10(test_only=False):
"""Load the original CIFAR10 data."""
# Define files to be downloaded
file_urls = ['https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz']
# Maybe download data and retrieve local storage urls
local_urls = maybe_download(file_urls, FLAGS.data_dir)
# Extract archives and return different sets
dataset = extract_cifar10(local_urls[0], FLAGS.data_dir)
# Unpack tuple
train_data, train_labels, test_data, test_labels = dataset
# Apply whitening to input data
train_data = image_whitening(train_data)
test_data = image_whitening(test_data)
if test_only:
return test_data, test_labels
else:
return train_data, train_labels, test_data, test_labels
def ld_mnist(test_only=False):
"""Load the MNIST dataset."""
# Define files to be downloaded
# WARNING: changing the order of this list will break indices (cf. below)
file_urls = ['http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
]
# Maybe download data and retrieve local storage urls
local_urls = maybe_download(file_urls, FLAGS.data_dir)
# Extract it into np arrays.
train_data = extract_mnist_data(local_urls[0], 60000, 28, 1)
train_labels = extract_mnist_labels(local_urls[1], 60000)
test_data = extract_mnist_data(local_urls[2], 10000, 28, 1)
test_labels = extract_mnist_labels(local_urls[3], 10000)
if test_only:
return test_data, test_labels
else:
return train_data, train_labels, test_data, test_labels
def partition_dataset(data, labels, nb_teachers, teacher_id):
"""
Simple partitioning algorithm that returns the right portion of the data
needed by a given teacher out of a certain nb of teachers
Args:
data: input data to be partitioned
labels: output data to be partitioned
nb_teachers: number of teachers in the ensemble (affects size of each
partition)
teacher_id: id of partition to retrieve
"""
# Sanity check
assert len(data) == len(labels)
assert int(teacher_id) < int(nb_teachers)
# This will floor the possible number of batches
batch_len = int(len(data) / nb_teachers)
# Compute start, end indices of partition
start = teacher_id * batch_len
end = (teacher_id+1) * batch_len
# Slice partition off
partition_data = data[start:end]
partition_labels = labels[start:end]
return partition_data, partition_labels
|
apache-2.0
|
salfab/CouchPotatoServer
|
libs/elixir/ext/encrypted.py
|
30
|
4330
|
'''
An encryption plugin for Elixir utilizing the excellent PyCrypto library, which
can be downloaded here: http://www.amk.ca/python/code/crypto
Values for columns that are specified to be encrypted will be transparently
encrypted and safely encoded for storage in a unicode column using the powerful
and secure Blowfish Cipher using a specified "secret" which can be passed into
the plugin at class declaration time.
Example usage:
.. sourcecode:: python
from elixir import *
from elixir.ext.encrypted import acts_as_encrypted
class Person(Entity):
name = Field(Unicode)
password = Field(Unicode)
ssn = Field(Unicode)
acts_as_encrypted(for_fields=['password', 'ssn'],
with_secret='secret')
The above Person entity will automatically encrypt and decrypt the password and
ssn columns on save, update, and load. Different secrets can be specified on
an entity by entity basis, for added security.
**Important note**: instance attributes are encrypted in-place. This means that
if one of the encrypted attributes of an instance is accessed after the
instance has been flushed to the database (and thus encrypted), the value for
that attribute will be crypted in the in-memory object in addition to the
database row.
'''
from Crypto.Cipher import Blowfish
from elixir.statements import Statement
from sqlalchemy.orm import MapperExtension, EXT_CONTINUE, EXT_STOP
try:
from sqlalchemy.orm import EXT_PASS
SA05orlater = False
except ImportError:
SA05orlater = True
__all__ = ['acts_as_encrypted']
__doc_all__ = []
#
# encryption and decryption functions
#
def encrypt_value(value, secret):
return Blowfish.new(secret, Blowfish.MODE_CFB) \
.encrypt(value).encode('string_escape')
def decrypt_value(value, secret):
return Blowfish.new(secret, Blowfish.MODE_CFB) \
.decrypt(value.decode('string_escape'))
#
# acts_as_encrypted statement
#
class ActsAsEncrypted(object):
def __init__(self, entity, for_fields=[], with_secret='abcdef'):
def perform_encryption(instance, encrypt=True):
encrypted = getattr(instance, '_elixir_encrypted', None)
if encrypted is encrypt:
# skipping encryption or decryption, as it is already done
return
else:
# marking instance as already encrypted/decrypted
instance._elixir_encrypted = encrypt
if encrypt:
func = encrypt_value
else:
func = decrypt_value
for column_name in for_fields:
current_value = getattr(instance, column_name)
if current_value:
setattr(instance, column_name,
func(current_value, with_secret))
def perform_decryption(instance):
perform_encryption(instance, encrypt=False)
class EncryptedMapperExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
perform_encryption(instance)
return EXT_CONTINUE
def before_update(self, mapper, connection, instance):
perform_encryption(instance)
return EXT_CONTINUE
if SA05orlater:
def reconstruct_instance(self, mapper, instance):
perform_decryption(instance)
# no special return value is required for
# reconstruct_instance, but you never know...
return EXT_CONTINUE
else:
def populate_instance(self, mapper, selectcontext, row,
instance, *args, **kwargs):
mapper.populate_instance(selectcontext, instance, row,
*args, **kwargs)
perform_decryption(instance)
# EXT_STOP because we already did populate the instance and
# the normal processing should not happen
return EXT_STOP
# make sure that the entity's mapper has our mapper extension
entity._descriptor.add_mapper_extension(EncryptedMapperExtension())
acts_as_encrypted = Statement(ActsAsEncrypted)
|
gpl-3.0
|
BrandonHe/sdl_core
|
src/3rd_party-static/jsoncpp/devtools/fixeol.py
|
247
|
1941
|
import os.path
def fix_source_eol( path, is_dry_run = True, verbose = True, eol = '\n' ):
"""Makes sure that all sources have the specified eol sequence (default: unix)."""
if not os.path.isfile( path ):
raise ValueError( 'Path "%s" is not a file' % path )
try:
f = open(path, 'rb')
except IOError, msg:
print >> sys.stderr, "%s: I/O Error: %s" % (file, str(msg))
return False
try:
raw_lines = f.readlines()
finally:
f.close()
fixed_lines = [line.rstrip('\r\n') + eol for line in raw_lines]
if raw_lines != fixed_lines:
print '%s =>' % path,
if not is_dry_run:
f = open(path, "wb")
try:
f.writelines(fixed_lines)
finally:
f.close()
if verbose:
print is_dry_run and ' NEED FIX' or ' FIXED'
return True
##
##
##
##def _do_fix( is_dry_run = True ):
## from waftools import antglob
## python_sources = antglob.glob( '.',
## includes = '**/*.py **/wscript **/wscript_build',
## excludes = antglob.default_excludes + './waf.py',
## prune_dirs = antglob.prune_dirs + 'waf-* ./build' )
## for path in python_sources:
## _fix_python_source( path, is_dry_run )
##
## cpp_sources = antglob.glob( '.',
## includes = '**/*.cpp **/*.h **/*.inl',
## prune_dirs = antglob.prune_dirs + 'waf-* ./build' )
## for path in cpp_sources:
## _fix_source_eol( path, is_dry_run )
##
##
##def dry_fix(context):
## _do_fix( is_dry_run = True )
##
##def fix(context):
## _do_fix( is_dry_run = False )
##
##def shutdown():
## pass
##
##def check(context):
## # Unit tests are run when "check" target is used
## ut = UnitTest.unit_test()
## ut.change_to_testfile_dir = True
## ut.want_to_see_test_output = True
## ut.want_to_see_test_error = True
## ut.run()
## ut.print_results()
|
bsd-3-clause
|
GGoussar/scikit-image
|
doc/examples/color_exposure/plot_adapt_rgb.py
|
9
|
4535
|
"""
=========================================
Adapting gray-scale filters to RGB images
=========================================
There are many filters that are designed to work with gray-scale images but not
with color images. To simplify the process of creating functions that can adapt
to RGB images, scikit-image provides the ``adapt_rgb`` decorator.
To actually use the ``adapt_rgb`` decorator, you have to decide how you want to
adapt the RGB image for use with the gray-scale filter. There are two
pre-defined handlers:
``each_channel``
Pass each of the RGB channels to the filter one-by-one, and stitch the
results back into an RGB image.
``hsv_value``
Convert the RGB image to HSV and pass the value channel to the filter.
The filtered result is inserted back into the HSV image and converted
back to RGB.
Below, we demonstrate the use of ``adapt_rgb`` on a couple of gray-scale
filters:
"""
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
from skimage import filters
@adapt_rgb(each_channel)
def sobel_each(image):
return filters.sobel(image)
@adapt_rgb(hsv_value)
def sobel_hsv(image):
return filters.sobel(image)
######################################################################
# We can use these functions as we would normally use them, but now they work
# with both gray-scale and color images. Let's plot the results with a color
# image:
from skimage import data
from skimage.exposure import rescale_intensity
import matplotlib.pyplot as plt
image = data.astronaut()
fig = plt.figure(figsize=(14, 7))
ax_each = fig.add_subplot(121, adjustable='box-forced')
ax_hsv = fig.add_subplot(122, sharex=ax_each, sharey=ax_each,
adjustable='box-forced')
# We use 1 - sobel_each(image)
# but this will not work if image is not normalized
ax_each.imshow(rescale_intensity(1 - sobel_each(image)))
ax_each.set_xticks([]), ax_each.set_yticks([])
ax_each.set_title("Sobel filter computed\n on individual RGB channels")
# We use 1 - sobel_hsv(image) but this will not work if image is not normalized
ax_hsv.imshow(rescale_intensity(1 - sobel_hsv(image)))
ax_hsv.set_xticks([]), ax_hsv.set_yticks([])
ax_hsv.set_title("Sobel filter computed\n on (V)alue converted image (HSV)")
######################################################################
# Notice that the result for the value-filtered image preserves the color of
# the original image, but channel filtered image combines in a more
# surprising way. In other common cases, smoothing for example, the channel
# filtered image will produce a better result than the value-filtered image.
#
# You can also create your own handler functions for ``adapt_rgb``. To do so,
# just create a function with the following signature::
#
# def handler(image_filter, image, *args, **kwargs):
# # Manipulate RGB image here...
# image = image_filter(image, *args, **kwargs)
# # Manipulate filtered image here...
# return image
#
# Note that ``adapt_rgb`` handlers are written for filters where the image is
# the first argument.
#
# As a very simple example, we can just convert any RGB image to grayscale
# and then return the filtered result:
from skimage.color import rgb2gray
def as_gray(image_filter, image, *args, **kwargs):
gray_image = rgb2gray(image)
return image_filter(gray_image, *args, **kwargs)
######################################################################
# It's important to create a signature that uses ``*args`` and ``**kwargs``
# to pass arguments along to the filter so that the decorated function is
# allowed to have any number of positional and keyword arguments.
#
# Finally, we can use this handler with ``adapt_rgb`` just as before:
@adapt_rgb(as_gray)
def sobel_gray(image):
return filters.sobel(image)
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111, sharex=ax_each, sharey=ax_each,
adjustable='box-forced')
# We use 1 - sobel_gray(image)
# but this will not work if image is not normalized
ax.imshow(rescale_intensity(1 - sobel_gray(image)), cmap=plt.cm.gray)
ax.set_xticks([]), ax.set_yticks([])
ax.set_title("Sobel filter computed\n on the converted grayscale image")
plt.show()
######################################################################
#
# .. note::
#
# A very simple check of the array shape is used for detecting RGB
# images, so ``adapt_rgb`` is not recommended for functions that support
# 3D volumes or color images in non-RGB spaces.
|
bsd-3-clause
|
BehavioralInsightsTeam/edx-platform
|
lms/djangoapps/experiments/utils.py
|
9
|
2910
|
from student.models import CourseEnrollment
from django_comment_common.models import Role
from courseware.access import has_staff_access_to_preview_mode
from course_modes.models import (
get_cosmetic_verified_display_price
)
from courseware.date_summary import (
verified_upgrade_deadline_link, verified_upgrade_link_is_valid
)
def check_and_get_upgrade_link_and_date(user, enrollment=None, course=None):
"""
For an authenticated user, return a link to allow them to upgrade
in the specified course.
"""
if enrollment is None and course is None:
raise ValueError("Must specify either an enrollment or a course")
if enrollment:
if course is None:
course = enrollment.course
elif enrollment.course_id != course.id:
raise ValueError("{} refers to a different course than {} which was supplied".format(
enrollment, course
))
if enrollment.user_id != user.id:
raise ValueError("{} refers to a different user than {} which was supplied".format(
enrollment, user
))
if enrollment is None:
enrollment = CourseEnrollment.get_enrollment(user, course.id)
if user.is_authenticated and verified_upgrade_link_is_valid(enrollment):
return (
verified_upgrade_deadline_link(user, course),
enrollment.upgrade_deadline
)
return (None, None)
def get_experiment_user_metadata_context(course, user):
"""
Return a context dictionary with the keys used by the user_metadata.html.
"""
enrollment_mode = None
enrollment_time = None
enrollment = None
try:
enrollment = CourseEnrollment.objects.select_related(
'course'
).get(user_id=user.id, course_id=course.id)
if enrollment.is_active:
enrollment_mode = enrollment.mode
enrollment_time = enrollment.created
except CourseEnrollment.DoesNotExist:
pass # Not enrolled, used the default None values
upgrade_link, upgrade_date = check_and_get_upgrade_link_and_date(user, enrollment, course)
has_staff_access = has_staff_access_to_preview_mode(user, course)
forum_roles = []
if user.is_authenticated:
forum_roles = list(Role.objects.filter(users=user, course_id=course.id).values_list('name').distinct())
return {
'upgrade_link': upgrade_link,
'upgrade_price': unicode(get_cosmetic_verified_display_price(course)),
'enrollment_mode': enrollment_mode,
'enrollment_time': enrollment_time,
'pacing_type': 'self_paced' if course.self_paced else 'instructor_paced',
'upgrade_deadline': upgrade_date,
'course_key': course.id,
'course_start': course.start,
'course_end': course.end,
'has_staff_access': has_staff_access,
'forum_roles': forum_roles
}
|
agpl-3.0
|
lfalvarez/nouabook
|
elections/migrations/0012_auto__add_field_votainteligentemessage_author_ville.py
|
1
|
12058
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'VotaInteligenteMessage.author_ville'
db.add_column(u'elections_votainteligentemessage', 'author_ville',
self.gf('django.db.models.fields.CharField')(default='', max_length=35),
keep_default=False)
def backwards(self, orm):
# Deleting field 'VotaInteligenteMessage.author_ville'
db.delete_column(u'elections_votainteligentemessage', 'author_ville')
models = {
u'candideitorg.answer': {
'Meta': {'object_name': 'Answer'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['candideitorg.Question']"}),
'remote_id': ('django.db.models.fields.IntegerField', [], {}),
'resource_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'candideitorg.candidate': {
'Meta': {'object_name': 'Candidate'},
'answers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['candideitorg.Answer']", 'null': 'True', 'blank': 'True'}),
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['candideitorg.Election']"}),
'has_answered': ('django.db.models.fields.BooleanField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'photo': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'remote_id': ('django.db.models.fields.IntegerField', [], {}),
'resource_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'candideitorg.category': {
'Meta': {'object_name': 'Category'},
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['candideitorg.Election']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'remote_id': ('django.db.models.fields.IntegerField', [], {}),
'resource_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'candideitorg.election': {
'Meta': {'object_name': 'Election'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'information_source': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_id': ('django.db.models.fields.IntegerField', [], {}),
'resource_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'use_default_media_naranja_option': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'candideitorg.question': {
'Meta': {'object_name': 'Question'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['candideitorg.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'remote_id': ('django.db.models.fields.IntegerField', [], {}),
'resource_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'elections.candidateperson': {
'Meta': {'object_name': 'CandidatePerson'},
'candidate': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'relation'", 'unique': 'True', 'to': u"orm['candideitorg.Candidate']"}),
'custom_ribbon': ('django.db.models.fields.CharField', [], {'max_length': '18', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'relation'", 'unique': 'True', 'to': u"orm['popit.Person']"}),
'portrait_photo': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'reachable': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'elections.election': {
'Meta': {'object_name': 'Election'},
'can_election': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['candideitorg.Election']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'extra_info_content': ('django.db.models.fields.TextField', [], {'max_length': '3000', 'null': 'True', 'blank': 'True'}),
'extra_info_title': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'highlighted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'popit_api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.ApiInstance']", 'null': 'True', 'blank': 'True'}),
'searchable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'name'", 'unique_with': '()'}),
'uses_face_to_face': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'uses_preguntales': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'uses_questionary': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'uses_ranking': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'uses_soul_mate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'writeitinstance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['writeit.WriteItInstance']", 'null': 'True', 'blank': 'True'})
},
u'elections.votainteligenteanswer': {
'Meta': {'object_name': 'VotaInteligenteAnswer'},
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': u"orm['elections.VotaInteligenteMessage']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': u"orm['popit.Person']"})
},
u'elections.votainteligentemessage': {
'Meta': {'object_name': 'VotaInteligenteMessage', '_ormbases': [u'writeit.Message']},
'author_ville': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '35'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fbshared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'message_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['writeit.Message']", 'unique': 'True', 'primary_key': 'True'}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'popit.apiinstance': {
'Meta': {'object_name': 'ApiInstance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('popit.fields.ApiInstanceURLField', [], {'unique': 'True', 'max_length': '200'})
},
u'popit.person': {
'Meta': {'object_name': 'Person'},
'api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.ApiInstance']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'popit_url': ('popit.fields.PopItURLField', [], {'default': "''", 'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'writeit.message': {
'Meta': {'object_name': 'Message', '_ormbases': [u'writeit.WriteItDocument']},
'author_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'content': ('django.db.models.fields.TextField', [], {}),
'people': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'messages'", 'symmetrical': 'False', 'to': u"orm['popit.Person']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
u'writeitdocument_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['writeit.WriteItDocument']", 'unique': 'True', 'primary_key': 'True'}),
'writeitinstance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['writeit.WriteItInstance']"})
},
u'writeit.writeitapiinstance': {
'Meta': {'object_name': 'WriteItApiInstance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'})
},
u'writeit.writeitdocument': {
'Meta': {'object_name': 'WriteItDocument'},
'api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['writeit.WriteItApiInstance']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'writeit.writeitinstance': {
'Meta': {'object_name': 'WriteItInstance', '_ormbases': [u'writeit.WriteItDocument']},
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'writeitdocument_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['writeit.WriteItDocument']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['elections']
|
gpl-3.0
|
sirchia/CouchPotatoServer
|
libs/migrate/versioning/template.py
|
62
|
2874
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import sys
from pkg_resources import resource_filename
from migrate.versioning.config import *
from migrate.versioning import pathed
class Collection(pathed.Pathed):
"""A collection of templates of a specific type"""
_mask = None
def get_path(self, file):
return os.path.join(self.path, str(file))
class RepositoryCollection(Collection):
_mask = '%s'
class ScriptCollection(Collection):
_mask = '%s.py_tmpl'
class ManageCollection(Collection):
_mask = '%s.py_tmpl'
class SQLScriptCollection(Collection):
_mask = '%s.py_tmpl'
class Template(pathed.Pathed):
"""Finds the paths/packages of various Migrate templates.
:param path: Templates are loaded from migrate package
if `path` is not provided.
"""
pkg = 'migrate.versioning.templates'
def __new__(cls, path=None):
if path is None:
path = cls._find_path(cls.pkg)
return super(Template, cls).__new__(cls, path)
def __init__(self, path=None):
if path is None:
path = Template._find_path(self.pkg)
super(Template, self).__init__(path)
self.repository = RepositoryCollection(os.path.join(path, 'repository'))
self.script = ScriptCollection(os.path.join(path, 'script'))
self.manage = ManageCollection(os.path.join(path, 'manage'))
self.sql_script = SQLScriptCollection(os.path.join(path, 'sql_script'))
@classmethod
def _find_path(cls, pkg):
"""Returns absolute path to dotted python package."""
tmp_pkg = pkg.rsplit('.', 1)
if len(tmp_pkg) != 1:
return resource_filename(tmp_pkg[0], tmp_pkg[1])
else:
return resource_filename(tmp_pkg[0], '')
def _get_item(self, collection, theme=None):
"""Locates and returns collection.
:param collection: name of collection to locate
:param type_: type of subfolder in collection (defaults to "_default")
:returns: (package, source)
:rtype: str, str
"""
item = getattr(self, collection)
theme_mask = getattr(item, '_mask')
theme = theme_mask % (theme or 'default')
return item.get_path(theme)
def get_repository(self, *a, **kw):
"""Calls self._get_item('repository', *a, **kw)"""
return self._get_item('repository', *a, **kw)
def get_script(self, *a, **kw):
"""Calls self._get_item('script', *a, **kw)"""
return self._get_item('script', *a, **kw)
def get_sql_script(self, *a, **kw):
"""Calls self._get_item('sql_script', *a, **kw)"""
return self._get_item('sql_script', *a, **kw)
def get_manage(self, *a, **kw):
"""Calls self._get_item('manage', *a, **kw)"""
return self._get_item('manage', *a, **kw)
|
gpl-3.0
|
gdimitris/ChessPuzzlerBackend
|
Virtual_Environment/lib/python2.7/site-packages/werkzeug/testapp.py
|
294
|
9398
|
# -*- coding: utf-8 -*-
"""
werkzeug.testapp
~~~~~~~~~~~~~~~~
Provide a small test application that can be used to test a WSGI server
and check it for WSGI compliance.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import werkzeug
from textwrap import wrap
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.utils import escape
import base64
logo = Response(base64.b64decode(
'''R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
='''), mimetype='image/png')
TEMPLATE = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<title>WSGI Information</title>
<style type="text/css">
@import url(http://fonts.googleapis.com/css?family=Ubuntu);
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; background-color: white; color: #000;
font-size: 15px; text-align: center; }
#logo { float: right; padding: 0 0 10px 10px; }
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
background-color: white; }
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
h1 { margin: 0 0 30px 0; }
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
font-weight: normal; width: 18em; vertical-align: top;
padding: 0.5em 0 0.1em 0.5em; }
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 0.7em; }
ul li { line-height: 1.5em; }
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
list-style: none; background: #E8EFF0; }
ul.path li { line-height: 1.6em; }
li.virtual { color: #999; text-decoration: underline; }
li.exp { background: white; }
</style>
<div class="box">
<img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
<h1>WSGI Information</h1>
<p>
This page displays all available information about the WSGI server and
the underlying Python interpreter.
<h2 id="python-interpreter">Python Interpreter</h2>
<table>
<tr>
<th>Python Version
<td>%(python_version)s
<tr>
<th>Platform
<td>%(platform)s [%(os)s]
<tr>
<th>API Version
<td>%(api_version)s
<tr>
<th>Byteorder
<td>%(byteorder)s
<tr>
<th>Werkzeug Version
<td>%(werkzeug_version)s
</table>
<h2 id="wsgi-environment">WSGI Environment</h2>
<table>%(wsgi_env)s</table>
<h2 id="installed-eggs">Installed Eggs</h2>
<p>
The following python packages were installed on the system as
Python eggs:
<ul>%(python_eggs)s</ul>
<h2 id="sys-path">System Path</h2>
<p>
The following paths are the current contents of the load path. The
following entries are looked up for Python packages. Note that not
all items in this path are folders. Gray and underlined items are
entries pointing to invalid resources or used by custom import hooks
such as the zip importer.
<p>
Items with a bright background were expanded for display from a relative
path. If you encounter such paths in the output you might want to check
your setup as relative paths are usually problematic in multithreaded
environments.
<ul class="path">%(sys_path)s</ul>
</div>
'''
def iter_sys_path():
if os.name == 'posix':
def strip(x):
prefix = os.path.expanduser('~')
if x.startswith(prefix):
x = '~' + x[len(prefix):]
return x
else:
strip = lambda x: x
cwd = os.path.abspath(os.getcwd())
for item in sys.path:
path = os.path.join(cwd, item or os.path.curdir)
yield strip(os.path.normpath(path)), \
not os.path.isdir(path), path != item
def render_testapp(req):
try:
import pkg_resources
except ImportError:
eggs = ()
else:
eggs = sorted(pkg_resources.working_set,
key=lambda x: x.project_name.lower())
python_eggs = []
for egg in eggs:
try:
version = egg.version
except (ValueError, AttributeError):
version = 'unknown'
python_eggs.append('<li>%s <small>[%s]</small>' % (
escape(egg.project_name),
escape(version)
))
wsgi_env = []
sorted_environ = sorted(req.environ.items(),
key=lambda x: repr(x[0]).lower())
for key, value in sorted_environ:
wsgi_env.append('<tr><th>%s<td><code>%s</code>' % (
escape(str(key)),
' '.join(wrap(escape(repr(value))))
))
sys_path = []
for item, virtual, expanded in iter_sys_path():
class_ = []
if virtual:
class_.append('virtual')
if expanded:
class_.append('exp')
sys_path.append('<li%s>%s' % (
class_ and ' class="%s"' % ' '.join(class_) or '',
escape(item)
))
return (TEMPLATE % {
'python_version': '<br>'.join(escape(sys.version).splitlines()),
'platform': escape(sys.platform),
'os': escape(os.name),
'api_version': sys.api_version,
'byteorder': sys.byteorder,
'werkzeug_version': werkzeug.__version__,
'python_eggs': '\n'.join(python_eggs),
'wsgi_env': '\n'.join(wsgi_env),
'sys_path': '\n'.join(sys_path)
}).encode('utf-8')
def test_app(environ, start_response):
"""Simple test application that dumps the environment. You can use
it to check if Werkzeug is working properly:
.. sourcecode:: pycon
>>> from werkzeug.serving import run_simple
>>> from werkzeug.testapp import test_app
>>> run_simple('localhost', 3000, test_app)
* Running on http://localhost:3000/
The application displays important information from the WSGI environment,
the Python interpreter and the installed libraries.
"""
req = Request(environ, populate_request=False)
if req.args.get('resource') == 'logo':
response = logo
else:
response = Response(render_testapp(req), mimetype='text/html')
return response(environ, start_response)
if __name__ == '__main__':
from werkzeug.serving import run_simple
run_simple('localhost', 5000, test_app, use_reloader=True)
|
mit
|
heladio/my-blog
|
pelica-env/lib/python2.7/site-packages/pip/req/req_install.py
|
31
|
44179
|
from __future__ import absolute_import
import logging
import os
import re
import shutil
import sys
import tempfile
import warnings
import zipfile
from distutils.util import change_root
from distutils import sysconfig
from email.parser import FeedParser
from pip._vendor import pkg_resources, six
from pip._vendor.distlib.markers import interpret as markers_interpret
from pip._vendor.six.moves import configparser
import pip.wheel
from pip.compat import native_str, WINDOWS
from pip.download import is_url, url_to_path, path_to_url, is_archive_file
from pip.exceptions import (
InstallationError, UninstallationError, UnsupportedWheel,
)
from pip.locations import (
bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user,
)
from pip.utils import (
display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir,
dist_in_usersite, dist_in_site_packages, egg_link_path, make_path_relative,
call_subprocess, read_text_file, FakeFile, _make_build_dir,
)
from pip.utils.deprecation import RemovedInPip7Warning, RemovedInPip8Warning
from pip.utils.logging import indent_log
from pip.req.req_uninstall import UninstallPathSet
from pip.vcs import vcs
from pip.wheel import move_wheel_files, Wheel
from pip._vendor.packaging.version import Version
_FILTER_INSTALL_OUTPUT_REGEX = re.compile(r"""
(?:^running\s.*) |
(?:^writing\s.*) |
(?:creating\s.*) |
(?:[Cc]opying\s.*) |
(?:^reading\s.*') |
(?:^removing\s.*\.egg-info'\s\(and\severything\sunder\sit\)$) |
(?:^byte-compiling) |
(?:^SyntaxError:) |
(?:^SyntaxWarning:) |
(?:^\s*Skipping\simplicit\sfixer:) |
(?:^\s*(warning:\s)?no\spreviously-included\s(files|directories)) |
(?:^\s*warning:\sno\sfiles\sfound matching\s\'.*\') |
(?:^\s*changing\smode\sof) |
# Not sure what this warning is, but it seems harmless:
(?:^warning:\smanifest_maker:\sstandard\sfile\s'-c'\snot found$)
""", re.VERBOSE)
logger = logging.getLogger(__name__)
def _filter_install(line):
level = logging.INFO
if _FILTER_INSTALL_OUTPUT_REGEX.search(line.strip()):
level = logging.DEBUG
return (level, line)
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
link=None, as_egg=False, update=True, editable_options=None,
pycompile=True, markers=None, isolated=False):
self.extras = ()
if isinstance(req, six.string_types):
req = pkg_resources.Requirement.parse(req)
self.extras = req.extras
self.req = req
self.comes_from = comes_from
self.source_dir = source_dir
self.editable = editable
if editable_options is None:
editable_options = {}
self.editable_options = editable_options
self.link = link
self.as_egg = as_egg
self.markers = markers
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
# Temporary build location
self._temp_build_dir = None
# Used to store the global directory where the _temp_build_dir should
# have been created. Cf _correct_build_location method.
self._ideal_global_dir = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
self.use_user_site = False
self.target_dir = None
self.pycompile = pycompile
self.isolated = isolated
@property
def url(self):
warnings.warn(
"The InstallRequirement.url attribute has been removed and should "
"not be used. It was temporary left here as a shim for projects "
"which used it even though it was not a public API.",
RemovedInPip7Warning,
)
return self.link.url
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None,
isolated=False):
from pip.index import Link
name, url, extras_override, editable_options = parse_editable(
editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir,
editable=True,
link=Link(url),
editable_options=editable_options,
isolated=isolated)
if extras_override is not None:
res.extras = extras_override
return res
@classmethod
def from_line(cls, name, comes_from=None, isolated=False):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
from pip.index import Link
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers = name.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
if is_url(name):
link = Link(name)
elif (os.path.isdir(path) and
(os.path.sep in name or name.startswith('.'))):
if not is_installable_dir(path):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not "
"found." % name
)
link = Link(path_to_url(name))
elif is_archive_file(path):
if not os.path.isfile(path):
logger.warning(
'Requirement %r looks like a filename, but the file does '
'not exist',
name
)
link = Link(path_to_url(name))
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', link.url):
link = Link(
path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel on this platform." %
wheel.filename
)
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
return cls(req, comes_from, link=link, markers=markers,
isolated=isolated)
def __str__(self):
if self.req:
s = str(self.req)
if self.link:
s += ' from %s' % self.link.url
else:
s = self.link.url if self.link else None
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def __repr__(self):
return '<%s object: %s editable=%r>' % (
self.__class__.__name__, str(self), self.editable)
def populate_link(self, finder, upgrade):
"""Ensure that if a link can be found for this, that it is found.
Note that self.link may still be None - if Upgrade is False and the
requirement is already installed.
"""
if self.link is None:
self.link = finder.find_requirement(self, upgrade)
@property
def specifier(self):
return self.req.specifier
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
# for requirement via a path to a directory: the name of the
# package is not available yet so we create a temp directory
# Once run_egg_info will have run, we'll be able
# to fix it via _correct_build_location
self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-')
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
logger.debug('Creating directory %s', build_dir)
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def _correct_build_location(self):
"""Move self._temp_build_dir to self._ideal_build_dir/self.req.name
For some requirements (e.g. a path to a directory), the name of the
package is not available until we run egg_info, so the build_location
will return a temporary directory and store the _ideal_build_dir.
This is only called by self.egg_info_path to fix the temporary build
directory.
"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
assert self._ideal_build_dir
old_location = self._temp_build_dir
self._temp_build_dir = None
new_location = self.build_location(self._ideal_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s',
self, display_path(old_location), display_path(new_location),
)
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self._ideal_build_dir = None
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return native_str(self.req.project_name)
@property
def setup_py(self):
assert self.source_dir, "No source dir for %s" % self
try:
import setuptools # noqa
except ImportError:
# Setuptools is not available
raise InstallationError(
"setuptools must be installed to install from a source "
"distribution"
)
setup_file = 'setup.py'
if self.editable_options and 'subdirectory' in self.editable_options:
setup_py = os.path.join(self.source_dir,
self.editable_options['subdirectory'],
setup_file)
else:
setup_py = os.path.join(self.source_dir, setup_file)
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
def run_egg_info(self):
assert self.source_dir
if self.name:
logger.debug(
'Running setup.py (path:%s) egg_info for package %s',
self.setup_py, self.name,
)
else:
logger.debug(
'Running setup.py (path:%s) egg_info for package from %s',
self.setup_py, self.link,
)
with indent_log():
# if it's distribute>=0.7, it won't contain an importable
# setuptools, and having an egg-info dir blocks the ability of
# setup.py to find setuptools plugins, so delete the egg-info dir
# if no setuptools. it will get recreated by the run of egg_info
# NOTE: this self.name check only works when installing from a
# specifier (not archive path/urls)
# TODO: take this out later
if (self.name == 'distribute' and not
os.path.isdir(
os.path.join(self.source_dir, 'setuptools'))):
rmtree(os.path.join(self.source_dir, 'distribute.egg-info'))
script = self._run_setup_py
script = script.replace('__SETUP_PY__', repr(self.setup_py))
script = script.replace('__PKG_NAME__', repr(self.name))
base_cmd = [sys.executable, '-c', script]
if self.isolated:
base_cmd += ["--no-user-cfg"]
egg_info_cmd = base_cmd + ['egg_info']
# We can't put the .egg-info files at the root, because then the
# source code will be mistaken for an installed egg, causing
# problems
if self.editable:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info')
if not os.path.exists(egg_info_dir):
os.makedirs(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=cwd,
filter_stdout=_filter_install,
show_stdout=False,
command_level=logging.DEBUG,
command_desc='python setup.py egg_info')
if not self.req:
if isinstance(
pkg_resources.parse_version(self.pkg_info()["Version"]),
Version):
op = "=="
else:
op = "==="
self.req = pkg_resources.Requirement.parse(
"".join([
self.pkg_info()["Name"],
op,
self.pkg_info()["Version"],
]))
self._correct_build_location()
# FIXME: This is a lame hack, entirely for PasteScript which has
# a self-provided entry point that causes this awkwardness
_run_setup_py = """
__file__ = __SETUP_PY__
from setuptools.command import egg_info
import pkg_resources
import os
import tokenize
def replacement_run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in pkg_resources.iter_entry_points('egg_info.writers'):
# require=False is the change we're making:
writer = ep.load(require=False)
if writer:
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
self.find_sources()
egg_info.egg_info.run = replacement_run
exec(compile(
getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'),
__file__,
'exec'
))
"""
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
data = read_text_file(filename)
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.source_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv
# environment
if (
os.path.exists(
os.path.join(root, dir, 'bin', 'python')
) or
os.path.exists(
os.path.join(
root, dir, 'Scripts', 'Python.exe'
)
)):
dirs.remove(dir)
# Also don't search through tests
elif dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError(
'No files/directories in %s (from %s)' % (base, filename)
)
assert filenames, \
"No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This
# can easily be the case if there is a dist folder which contains
# an extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(
key=lambda x: x.count(os.path.sep) +
(os.path.altsep and x.count(os.path.altsep) or 0)
)
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warning(
'No PKG-INFO file found in %s',
display_path(self.egg_info_path('PKG-INFO')),
)
p.feed(data or '')
return p.close()
_requirements_section_re = re.compile(r'\[(.*?)\]')
@property
def installed_version(self):
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(self.name)
# We want to avoid having this cached, so we need to construct a new
# working set each time.
working_set = pkg_resources.WorkingSet()
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
if dist:
return dist.version
def assert_source_matches_version(self):
assert self.source_dir
version = self.pkg_info()['version']
if version not in self.req:
logger.warning(
'Requested %s, but installing version %s',
self,
self.installed_version,
)
else:
logger.debug(
'Source in %s has version %s, which satisfies requirement %s',
display_path(self.source_dir),
version,
self,
)
def update_editable(self, obtain=True):
if not self.link:
logger.debug(
"Cannot update repository at %s; repository location is "
"unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.link.scheme == 'file':
# Static paths don't get updated
return
assert '+' in self.link.url, "bad url: %r" % self.link.url
if not self.update:
return
vc_type, url = self.link.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.link.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.link, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError(
"Cannot uninstall requirement %s, not installed" % (self.name,)
)
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
develop_egg_link = egg_link_path(dist)
develop_egg_link_egg_info = '{0}.egg-info'.format(
pkg_resources.to_filename(dist.project_name))
egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
# Special case for distutils installed package
distutils_egg_info = getattr(dist._provider, 'path', None)
# Uninstall cases order do matter as in the case of 2 installs of the
# same package, pip needs to uninstall the currently detected version
if (egg_info_exists and dist.egg_info.endswith('.egg-info') and
not dist.egg_info.endswith(develop_egg_link_egg_info)):
# if dist.egg_info.endswith(develop_egg_link_egg_info), we
# are in fact in the develop_egg_link case
paths_to_remove.add(dist.egg_info)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(dist.egg_info, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif distutils_egg_info:
warnings.warn(
"Uninstalling a distutils installed project ({0}) has been "
"deprecated and will be removed in a future version. This is "
"due to the fact that uninstalling a distutils project will "
"only partially uninstall the project.".format(self.name),
RemovedInPip8Warning,
)
paths_to_remove.add(distutils_egg_info)
elif dist.location.endswith('.egg'):
# package installed by easy_install
# We cannot match on dist.egg_name because it can slightly vary
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
paths_to_remove.add(dist.location)
easy_install_egg = os.path.split(dist.location)[1]
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, self.name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
else:
logger.debug(
'Not sure how to uninstall: %s - Check: %s',
dist, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = configparser.SafeConfigParser()
config.readfp(
FakeFile(dist.get_metadata_lines('entry_points.txt'))
)
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, name))
if WINDOWS:
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe.manifest'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '-script.py'
)
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error(
"Can't rollback %s, nothing uninstalled.", self.project_name,
)
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
else:
logger.error(
"Can't commit %s, nothing uninstalled.", self.project_name,
)
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"])
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(archive_path), ('i', 'w', 'b'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warning('Deleting %s', display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warning(
'Backing up %s to %s',
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
if create_archive:
zip = zipfile.ZipFile(
archive_path, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True
)
dir = os.path.normcase(os.path.abspath(self.source_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.info('Saved %s', display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix + os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix)
)
name = name[len(prefix) + 1:]
name = name.replace(os.path.sep, '/')
return name
def match_markers(self):
if self.markers is not None:
return markers_interpret(self.markers)
else:
return True
def install(self, install_options, global_options=(), root=None):
if self.editable:
self.install_editable(install_options, global_options)
return
if self.is_wheel:
version = pip.wheel.wheel_version(self.source_dir)
pip.wheel.check_compatibility(version, self.name)
self.move_wheel_files(self.source_dir, root=root)
self.install_succeeded = True
return
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = [sys.executable]
install_args.append('-c')
install_args.append(
"import setuptools, tokenize;__file__=%r;"
"exec(compile(getattr(tokenize, 'open', open)(__file__).read()"
".replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
)
install_args += list(global_options) + \
['install', '--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if self.pycompile:
install_args += ["--compile"]
else:
install_args += ["--no-compile"]
if running_under_virtualenv():
py_ver_str = 'python' + sysconfig.get_python_version()
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
py_ver_str, self.name)]
logger.info('Running setup.py install for %s', self.name)
with indent_log():
call_subprocess(
install_args + install_options,
cwd=self.source_dir,
filter_stdout=_filter_install,
show_stdout=False,
)
if not os.path.exists(record_filename):
logger.debug('Record file %s not found', record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install
# command so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
with open(record_filename) as f:
for line in f:
directory = os.path.dirname(line)
if directory.endswith('.egg-info'):
egg_info_dir = prepend_root(directory)
break
else:
logger.warning(
'Could not find .egg-info directory in install record'
' for %s',
self,
)
# FIXME: put the record somewhere
# FIXME: should this be an error?
return
new_lines = []
with open(record_filename) as f:
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
make_path_relative(
prepend_root(filename), egg_info_dir)
)
inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
with open(inst_files_path, 'w') as f:
f.write('\n'.join(new_lines) + '\n')
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
rmtree(temp_location)
def ensure_has_source_dir(self, parent_dir):
"""Ensure that a source_dir is set.
This will create a temporary build dir if the name of the requirement
isn't known yet.
:param parent_dir: The ideal pip parent_dir for the source_dir.
Generally src_dir for editables and build_dir for sdists.
:return: self.source_dir
"""
if self.source_dir is None:
self.source_dir = self.build_location(parent_dir)
return self.source_dir
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.source_dir and os.path.exists(
os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)):
logger.debug('Removing source in %s', self.source_dir)
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options, global_options=()):
logger.info('Running setup.py develop for %s', self.name)
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
with indent_log():
# FIXME: should we do --install-headers here too?
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
[
sys.executable,
'-c',
"import setuptools, tokenize; __file__=%r; exec(compile("
"getattr(tokenize, 'open', open)(__file__).read().replace"
"('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
] +
list(global_options) +
['develop', '--no-deps'] +
list(install_options),
cwd=cwd, filter_stdout=self._filter_install,
show_stdout=False)
self.install_succeeded = True
def _filter_install(self, line):
return _filter_install(line)
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately.
"""
if self.req is None:
return False
try:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# if we've already set distribute as a conflict to setuptools
# then this check has already run before. we don't want it to
# run again, and return False, since it would block the uninstall
# TODO: remove this later
if (self.req.project_name == 'setuptools' and
self.conflicts_with and
self.conflicts_with.project_name == 'distribute'):
return True
else:
self.satisfied_by = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(
self.req.project_name
)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif (running_under_virtualenv() and
dist_in_site_packages(existing_dist)):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to %s in %s" %
(existing_dist.project_name, existing_dist.location)
)
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.link and self.link.is_wheel
def move_wheel_files(self, wheeldir, root=None):
move_wheel_files(
self.name, self.req, wheeldir,
user=self.use_user_site,
home=self.target_dir,
root=root,
pycompile=self.pycompile,
isolated=self.isolated,
)
def get_dist(self):
"""Return a pkg_resources.Distribution built from self.egg_info_path"""
egg_info = self.egg_info_path('').rstrip('/')
base_dir = os.path.dirname(egg_info)
metadata = pkg_resources.PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
return pkg_resources.Distribution(
os.path.dirname(egg_info),
project_name=dist_name,
metadata=metadata)
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
def _build_req_from_url(url):
parts = [p for p in url.split('#', 1)[0].split('/') if p]
req = None
if parts[-2] in ('tags', 'branches', 'tag', 'branch'):
req = parts[-3]
elif parts[-1] == 'trunk':
req = parts[-2]
return req
def _build_editable_options(req):
"""
This method generates a dictionary of the query string
parameters contained in a given editable URL.
"""
regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)")
matched = regexp.findall(req)
if matched:
ret = dict()
for option in matched:
(name, value) = option
if name in ret:
raise Exception("%s option already defined" % name)
ret[name] = value
return ret
return None
def parse_editable(editable_req, default_vcs=None):
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not found." %
url_no_extras
)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
if extras:
return (
None,
url_no_extras,
pkg_resources.Requirement.parse(
'__placeholder__' + extras
).extras,
{},
)
else:
return None, url_no_extras, None, {}
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
if default_vcs:
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either be a path to a local project or a VCS url '
'beginning with svn+, git+, hg+, or bzr+' %
editable_req
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
try:
options = _build_editable_options(editable_req)
except Exception as exc:
raise InstallationError(
'--editable=%s error in editable options:%s' % (editable_req, exc)
)
if not options or 'egg' not in options:
req = _build_req_from_url(editable_req)
if not req:
raise InstallationError(
'--editable=%s is not the right format; it must have '
'#egg=Package' % editable_req
)
else:
req = options['egg']
package = _strip_postfix(req)
return package, url, None, options
|
mit
|
madjelan/scikit-learn
|
sklearn/linear_model/passive_aggressive.py
|
106
|
9705
|
# Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True,
n_iter=5, shuffle=True, verbose=0, loss="hinge",
n_jobs=1, random_state=None, warm_start=False):
BaseSGDClassifier.__init__(self,
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, class_weight=None,
warm_start=False):
BaseSGDRegressor.__init__(self,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
|
bsd-3-clause
|
Azure/azure-sdk-for-python
|
tools/azure-devtools/src/azure_devtools/scenario_tests/recording_processors.py
|
1
|
10235
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from copy import deepcopy
from zlib import decompress
import six
from .utilities import is_text_payload, is_json_payload, is_batch_payload, replace_subscription_id
class RecordingProcessor(object):
def process_request(self, request): # pylint: disable=no-self-use
return request
def process_response(self, response): # pylint: disable=no-self-use
return response
@classmethod
def replace_header(cls, entity, header, old, new):
cls.replace_header_fn(entity, header, lambda v: v.replace(old, new))
@classmethod
def replace_header_fn(cls, entity, header, replace_fn):
# Loop over the headers to find the one we want case insensitively,
# but we don't want to modify the case of original header key.
for key, values in entity["headers"].items():
if key.lower() == header.lower():
if isinstance(values, list):
entity["headers"][key] = [replace_fn(v) for v in values]
else:
entity["headers"][key] = replace_fn(values)
class SubscriptionRecordingProcessor(RecordingProcessor):
def __init__(self, replacement):
self._replacement = replacement
def process_request(self, request):
request.uri = replace_subscription_id(request.uri, replacement=self._replacement)
if is_text_payload(request) and request.body:
request.body = replace_subscription_id(request.body.decode(), replacement=self._replacement).encode()
return request
def process_response(self, response):
if is_text_payload(response) and response["body"]["string"]:
response["body"]["string"] = replace_subscription_id(
response["body"]["string"], replacement=self._replacement
)
self.replace_header_fn(response, "location", replace_subscription_id)
self.replace_header_fn(response, "azure-asyncoperation", replace_subscription_id)
try:
response["url"] = replace_subscription_id(response["url"], replacement=self._replacement)
except KeyError:
pass
return response
class LargeRequestBodyProcessor(RecordingProcessor):
def __init__(self, max_request_body=128):
self._max_request_body = max_request_body
def process_request(self, request):
if is_text_payload(request) and request.body and len(request.body) > self._max_request_body * 1024:
request.body = (
"!!! The request body has been omitted from the recording because its "
"size {} is larger than {}KB. !!!".format(len(request.body), self._max_request_body)
)
return request
class LargeResponseBodyProcessor(RecordingProcessor):
control_flag = "<CTRL-REPLACE>"
def __init__(self, max_response_body=128):
self._max_response_body = max_response_body
def process_response(self, response):
if is_text_payload(response):
length = len(response["body"]["string"] or "")
if length > self._max_response_body * 1024:
if is_json_payload(response):
from .decorators import AllowLargeResponse # pylint: disable=cyclic-import
raise ValueError(
"The json response body exceeds the default limit of {}kb. Use '@{}' "
"on your test method to increase the limit or update test logics to avoid "
"big payloads".format(self._max_response_body, AllowLargeResponse.__name__)
)
response["body"]["string"] = (
"!!! The response body has been omitted from the recording because it is larger "
"than {} KB. It will be replaced with blank content of {} bytes while replay. "
"{}{}".format(self._max_response_body, length, self.control_flag, length)
)
return response
class LargeResponseBodyReplacer(RecordingProcessor):
def process_response(self, response):
if is_text_payload(response) and not is_json_payload(response):
import six
body = response["body"]["string"]
# backward compatibility. under 2.7 response body is unicode, under 3.5 response body is
# bytes. when set the value back, the same type must be used.
body_is_string = isinstance(body, six.string_types)
content_in_string = (response["body"]["string"] or b"").decode("utf-8")
index = content_in_string.find(LargeResponseBodyProcessor.control_flag)
if index > -1:
length = int(content_in_string[index + len(LargeResponseBodyProcessor.control_flag) :])
if body_is_string:
response["body"]["string"] = "0" * length
else:
response["body"]["string"] = bytes([0] * length)
return response
class AuthenticationMetadataFilter(RecordingProcessor):
"""Remove authority and tenant discovery requests and responses from recordings.
MSAL sends these requests to obtain non-secret metadata about the token authority. Recording them is unnecessary
because tests use fake credentials during playback that don't invoke MSAL.
"""
def process_request(self, request):
if "/.well-known/openid-configuration" in request.uri or "/common/discovery/instance" in request.uri:
return None
return request
class OAuthRequestResponsesFilter(RecordingProcessor):
"""Remove oauth authentication requests and responses from recording."""
def process_request(self, request):
# filter request like:
# GET https://login.microsoftonline.com/72f988bf-86f1-41af-91ab-2d7cd011db47/oauth2/token
# POST https://login.microsoftonline.com/72f988bf-86f1-41af-91ab-2d7cd011db47/oauth2/v2.0/token
import re
if not re.search("/oauth2(?:/v2.0)?/token", request.uri):
return request
return None
class DeploymentNameReplacer(RecordingProcessor):
"""Replace the random deployment name with a fixed mock name."""
def process_request(self, request):
import re
request.uri = re.sub("/deployments/([^/?]+)", "/deployments/mock-deployment", request.uri)
return request
class AccessTokenReplacer(RecordingProcessor):
"""Replace the access token for service principal authentication in a response body."""
def __init__(self, replacement="fake_token"):
self._replacement = replacement
def process_response(self, response):
import json
try:
body = json.loads(response["body"]["string"])
body["access_token"] = self._replacement
except (KeyError, ValueError):
return response
response["body"]["string"] = json.dumps(body)
return response
class GeneralNameReplacer(RecordingProcessor):
def __init__(self):
self.names_name = []
def register_name_pair(self, old, new):
self.names_name.append((old, new))
def process_request(self, request):
for old, new in self.names_name:
request.uri = request.uri.replace(old, new)
if is_text_payload(request) and request.body:
if isinstance(request.body, dict):
request.body = self._process_body_as_dict(request.body)
else:
body = six.ensure_str(request.body)
if old in body:
request.body = body.replace(old, new)
if request.body and request.uri and is_batch_payload(request):
import re
body = six.ensure_str(request.body)
matched_objects = set(re.findall(old, body))
for matched_object in matched_objects:
request.body = body.replace(matched_object, new)
body = body.replace(matched_object, new)
return request
def _process_body_as_dict(self, body):
new_body = deepcopy(body)
for key in new_body.keys():
for old, new in self.names_name:
new_body[key].replace(old, new)
return new_body
def process_response(self, response):
for old, new in self.names_name:
if is_text_payload(response) and response["body"]["string"]:
try:
response["body"]["string"] = response["body"]["string"].replace(old, new)
except UnicodeDecodeError:
body = response["body"]["string"]
response["body"]["string"].decode("utf8", "backslashreplace").replace(old, new).encode(
"utf8", "backslashreplace"
)
except TypeError:
pass
self.replace_header(response, "location", old, new)
self.replace_header(response, "operation-location", old, new)
self.replace_header(response, "azure-asyncoperation", old, new)
self.replace_header(response, "www-authenticate", old, new)
try:
response["url"] = replace_subscription_id(response["url"])
except KeyError:
pass
try:
for old, new in self.names_name:
response["url"].replace(old, new)
except KeyError:
pass
try:
response["url"] = response["url"].replace(old, new)
except KeyError:
pass
return response
class RequestUrlNormalizer(RecordingProcessor):
"""URL parsing fix to account for '//' vs '/' in different versions of python"""
def process_request(self, request):
import re
request.uri = re.sub("(?<!:)//", "/", request.uri)
return request
|
mit
|
pascalguru/florincoin
|
qa/rpc-tests/invalidateblock.py
|
123
|
3175
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test InvalidateBlock code
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class InvalidateTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
def run_test(self):
print "Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:"
print "Mine 4 blocks on Node 0"
self.nodes[0].setgenerate(True, 4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
print "Mine competing 6 blocks on Node 1"
self.nodes[1].setgenerate(True, 6)
assert(self.nodes[1].getblockcount() == 6)
print "Connect nodes to force a reorg"
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
print "Invalidate block 2 on node 0 and verify we reorg to node 0's original chain"
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
print "\nMake sure we won't reorg to a lower work chain:"
connect_nodes_bi(self.nodes,1,2)
print "Sync node 2 to node 1 so both have 6 blocks"
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
print "Invalidate block 5 on node 1 so its tip is now at 4"
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
print "Invalidate block 3 on node 2, so its tip is now 2"
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
print "..and then mine a block"
self.nodes[2].setgenerate(True, 1)
print "Verify all nodes are at the right height"
time.sleep(5)
for i in xrange(3):
print i,self.nodes[i].getblockcount()
assert(self.nodes[2].getblockcount() == 3)
assert(self.nodes[0].getblockcount() == 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
|
mit
|
marcusramberg/dotfiles
|
bin/.venv-ansible-venv/lib/python2.6/site-packages/ansible/modules/extras/network/openvswitch_port.py
|
49
|
4068
|
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, David Stygstra <[email protected]>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: openvswitch_port
version_added: 1.4
author: David Stygstra
short_description: Manage Open vSwitch ports
requirements: [ ovs-vsctl ]
description:
- Manage Open vSwitch ports
options:
bridge:
required: true
description:
- Name of bridge to manage
port:
required: true
description:
- Name of port to manage on the bridge
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the port should exist
timeout:
required: false
default: 5
description:
- How long to wait for ovs-vswitchd to respond
'''
EXAMPLES = '''
# Creates port eth2 on bridge br-ex
- openvswitch_port: bridge=br-ex port=eth2 state=present
'''
class OVSPort(object):
def __init__(self, module):
self.module = module
self.bridge = module.params['bridge']
self.port = module.params['port']
self.state = module.params['state']
self.timeout = module.params['timeout']
def _vsctl(self, command):
'''Run ovs-vsctl command'''
return self.module.run_command(['ovs-vsctl', '-t', str(self.timeout)] + command)
def exists(self):
'''Check if the port already exists'''
rc, out, err = self._vsctl(['list-ports', self.bridge])
if rc != 0:
raise Exception(err)
return any(port.rstrip() == self.port for port in out.split('\n'))
def add(self):
'''Add the port'''
rc, _, err = self._vsctl(['add-port', self.bridge, self.port])
if rc != 0:
raise Exception(err)
def delete(self):
'''Remove the port'''
rc, _, err = self._vsctl(['del-port', self.bridge, self.port])
if rc != 0:
raise Exception(err)
def check(self):
'''Run check mode'''
try:
if self.state == 'absent' and self.exists():
changed = True
elif self.state == 'present' and not self.exists():
changed = True
else:
changed = False
except Exception, e:
self.module.fail_json(msg=str(e))
self.module.exit_json(changed=changed)
def run(self):
'''Make the necessary changes'''
changed = False
try:
if self.state == 'absent':
if self.exists():
self.delete()
changed = True
elif self.state == 'present':
if not self.exists():
self.add()
changed = True
except Exception, e:
self.module.fail_json(msg=str(e))
self.module.exit_json(changed=changed)
def main():
module = AnsibleModule(
argument_spec={
'bridge': {'required': True},
'port': {'required': True},
'state': {'default': 'present', 'choices': ['present', 'absent']},
'timeout': {'default': 5, 'type': 'int'}
},
supports_check_mode=True,
)
port = OVSPort(module)
if module.check_mode:
port.check()
else:
port.run()
# import module snippets
from ansible.module_utils.basic import *
main()
|
mit
|
alexmerser/overholt
|
tests/api/product_tests.py
|
10
|
1352
|
# -*- coding: utf-8 -*-
"""
tests.api.product_tests
~~~~~~~~~~~~~~~~~~~~~~~
api product tests module
"""
from ..factories import CategoryFactory, ProductFactory
from . import OverholtApiTestCase
class ProductApiTestCase(OverholtApiTestCase):
def _create_fixtures(self):
super(ProductApiTestCase, self)._create_fixtures()
self.category = CategoryFactory()
self.product = ProductFactory(categories=[self.category])
def test_get_products(self):
r = self.jget('/products')
self.assertOkJson(r)
def test_get_product(self):
r = self.jget('/products/%s' % self.product.id)
self.assertOkJson(r)
def test_create_product(self):
r = self.jpost('/products', data={
'name': 'New Product',
'categories': [self.category.id]
})
self.assertOkJson(r)
def test_create_invalid_product(self):
r = self.jpost('/products', data={
'categories': [self.category.id]
})
self.assertBadJson(r)
def test_update_product(self):
r = self.jput('/products/%s' % self.product.id, data={
'name': 'New Product'
})
self.assertOkJson(r)
def test_delete_product(self):
r = self.jdelete('/products/%s' % self.product.id)
self.assertStatusCode(r, 204)
|
mit
|
stanbellcom/webapp_politik
|
sites/all/modules/annotator/lib/store-plugin/pyenv/lib/python2.7/site-packages/werkzeug/testsuite/multipart/collect.py
|
78
|
1584
|
#!/usr/bin/env python
"""
Hacky helper application to collect form data.
"""
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
def copy_stream(request):
from os import mkdir
from time import time
folder = 'request-%d' % time()
mkdir(folder)
environ = request.environ
f = file(folder + '/request.txt', 'wb+')
f.write(environ['wsgi.input'].read(int(environ['CONTENT_LENGTH'])))
f.flush()
f.seek(0)
environ['wsgi.input'] = f
request.stat_folder = folder
def stats(request):
copy_stream(request)
f1 = request.files['file1']
f2 = request.files['file2']
text = request.form['text']
f1.save(request.stat_folder + '/file1.bin')
f2.save(request.stat_folder + '/file2.bin')
file(request.stat_folder + '/text.txt', 'w').write(text.encode('utf-8'))
return Response('Done.')
def upload_file(request):
return Response('''
<h1>Upload File</h1>
<form action="" method="post" enctype="multipart/form-data">
<input type="file" name="file1"><br>
<input type="file" name="file2"><br>
<textarea name="text"></textarea><br>
<input type="submit" value="Send">
</form>
''', mimetype='text/html')
def application(environ, start_responseonse):
request = Request(environ)
if request.method == 'POST':
response = stats(request)
else:
response = upload_file(request)
return response(environ, start_responseonse)
if __name__ == '__main__':
run_simple('localhost', 5000, application, use_debugger=True)
|
gpl-2.0
|
hickey/amforth
|
tools/amforth-shell.py
|
2
|
56764
|
#!/usr/bin/python
#
# pySerial based upload & interpreter interaction module for amforth.
#
# Copyright 2011 Keith Amidon ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Patcher remarks:
# ================
#
# This uploader saves dictionary space and words clutter by substituting
# uC register names and application constants with numbers. The
# appl_defs.frt (Forth) file in the application's local directory
# provides the constant definitions. In appl_defs.frt put each constant
# on a line of its own. The first line, if it begins with a backslash
# comment, would be echoed to the screen when uploading the Forth
# code. It is recommended to place in appl_defs.frt global constant
# definitions which would affect compilation of the library and the
# project code. For example:
#
# \ Project Name
# $d5 constant CRC8MSB
# 10 constant max_number_of_users
#
# Invoke the shell with the argument --log log.frt to collect the lines
# which were uploaded to the AmForth system that received an " ok"
# response (log.frt is just a file-name example). This file can later be
# uploaded to another system using a tool simpler than the shell. Leave
# the shell by #exit to close log.frt properly.
#
# Invoke the shell with the argument --rtscts to enable serial port
# RTS/CTS hardware handshake connection.
#
# =====================================================================
# DOCUMENTATION
# =====================================================================
# This module module may be used as a script or imported for use as a
# part of a larger python program.
#
# Script Usage
# ------------
# When used as a script this module provides two main functions, the
# ability to reliably upload files to the amforth interpreter and an
# interpreter interaction mode with line editing, word completion and
# previous input history. For information on how to access these
# features when invoking the module as a script, execute it with the
# --help option and read the following sections on the interaction
# protocol and local directives.
#
#
# Interaction Protocol
# --------------------
# The amforth interaction protocol used by this module is to send a
# line to amforth character by character, reading the echos as quickly
# as possible. Once the entire line has been sent it then reads the
# response up until the " ok" prompt or a prompt that looks like an
# error response. The character by character handling of echos
# appears to eliminate unexpected device resets when compared to the
# line by line method used by previous tools, possibly by eliminating
# the possibility of serial tx overrun on the device.
#
# To further optimize interaction with the device lines are evaluated
# before sending and redundant whitespace is compressed out. Lines
# which are all whitespace or whitespace and comments are not sent and
# the next line is handled.
#
#
# Local Directives
# ----------------
# A number of special directives are supported which instruct the
# script to do something and are handled locally without being sent to
# amforth. Directives may be specified within comments or outside
# comments as controlled by the "#directive" directive. They must be
# the only contents of a line or they will be ignored. The directives
# include:
#
# #install <file>
# Upload the named <file> before proceeding further.
#
# #include <file>
# Like #install but would skip if <file> was already uploaded
# during the shell session.
#
# #cd <dir>
# Change the current local directory to the location specified.
# During uploads, this directive affects the current file and
# files it includes. Once the current file is complete the old
# value will be restored.
#
# #directive <config>
# Change how directives are discovered. The valid values for
# <config> are:
# none : Stop looking for any directives
# commented : Only look for directives within comments
# Commented directives must be the first word of the
# comment. The remaining text in the comment is the
# argument provided to the directive. There must
# not be any other non-whitespace text other than
# the comment start and (if required) end characters
# and the directive and any directive argument on a
# commented directive line. If any other text is
# present on the line an error will be generated.
# uncommented : Only look for directives outside comments.
# Uncommented directives must be the first word of a
# line and extend to the end of the line. If a
# directive name exists in a subsequent word of a
# line it will be sent to the interpreter as a word
# like any other.
# all : Allow both commented and uncommented directives.
# This is the default.
# During uploads, this directive affects the current file and
# files it includes. Once the current file is complete the old
# value will be restored.
#
# #timeout <float>
# Change the timeout value to <float> seconds. Fractional
# values are supported. During uploads, this directive affects
# the current file and files it includes. Once the current file
# is complete the old value will be restored.
#
# #timeout-next <float>
# Change the timeout value for the next line sent to the
# interpreter to <float> seconds. Fractional values are
# supported. The timeout returns to its previous value after
# the next line is sent to the interpreter. If this directive
# is encountered as the very last line of an upload file it will
# have no effect.
#
# #error-on-output [<yes-or-no>]
# Controls whether an error is generated if unexpected output
# occurs during an upload. The default is yes. This directive
# can not be used in interactive mode as it would not have any
# effect. During uploads it affects the rest of the current file
# and any files it includes. The argument is optional. If not
# given it is assumed to be "yes".
#
# #ignore-error [<yes-or-no>]
# Ignore any error that occurs later in the current upload file
# or a file it includes. The argument is optional. If given
# the behavior is set as specified. If not given it is assumed
# to be "yes".
#
# #ignore-error-next [<yes-or-no>]
# Ignore any error that occurs on the next line. The argument
# is optional. If given the behavior is set as specified. If
# not given it is assumed to be "yes".
#
# #expect-output-next [<regexp>]
# Expect specific output on the next line. The argument is
# optional. If it is not specified a default regular expression
# of ".*" (match everything) is assumed. This overrides the
# #error-on-output directive. An error is raised if the output
# doesn't match the regular expression. It will be ignored if
# #ignore-error is yes. Use of this directive without an
# argument is the way to prevent an error on output when
# #error-on-output is yes
#
# #start-string-word <word>
# Add a word that starts a string. The string will end when a
# double quote character is read.
#
# #quote-char-word <word>
# Add a word that quotes the immediately next word
#
# #interact
# Start an interactive session before proceeding with file upload.
# This only makes sense during a file upload.
#
# #edit [<filename>]
# Edit a file. The filename is optional. If it is provided the
# named file will be edited. If it is not provided and the last
# upload ended in an error the file that had the error will be
# edited at the location of the error. If there was no previous
# upload or the last upload completed successfully but an #edit
# directive was previously issued with a filename, edit the file
# previously named. Finally, if none of these apply an error is
# printed. The editor used can be specified with the --editor
# option when starting the program or through the EDITOR
# environment variable.
#
# #update-words
# This directive is only available in an interactive session.
# It cause the interaction code to reload the list of words used
# for completion from the amforth interpreter. Typically it is
# not required as words are updated automatically when the
# session starts and any time a file is uploaded as a results of
# a #include directive. The case where it is required is when
# completion is needed for words defined interactively during
# the session.
#
# #update-cpu
# This directive is only available in an interactive session.
# It causes the interaction code to read the controller name
# from the device and tries to load a specific python module
# which contains names for registers and addresses. These names
# can be used in forth code and get replace with the corresponding
# numbers.
#
# #exit
# Exit an interactive session or the current upload immediately.
# If encountered during an upload, no further lines from the
# file will be processed.
#
#
# Programmatic Usage
# ------------------
# For programmatic usage, a single class named AMForth is provided.
# It can be instantiated with no arguments but typically a serial port
# device and port speed will be provided as the defaults are unlikely
# to be correct.
#
# Once an instance is obtained, and connected the high-level entry
# points are the "upload_file" and "interact" methods, the former
# uploading a file to the AMForth interperter and the latter providing
# an interative interpreter shell with command history and word
# completion. These methods provide progress information in various
# cases by calling the function stored in the "progress_callback"
# property with three arguments, the type of progress being reported,
# a line number if available (otherwise it is None) and a message with
# further information. The default progress callback prints this
# information to the screen in a terse format. Other programs may
# wish to replace this with their own progress presentation function.
#
# Low-level interaction with the AMForth interpreter would typically
# use the "send_line" and "read_response" methods. Before these can
# be used the serial connection must be established. The
# serial_connected property indicates whether a connection currently
# exists. A good way to obtain a connection and rule out errors in
# serial communication is to call "find_prompt" which ensures the
# existence of a serial connection and sends a newline to the AMForth
# interperter and watches for the echo. This is usually the best way
# of establishing a connection but the "serial_connect" method will
# open a connection without sending anything if that is required.
#
# Elimination of whitespace and discovery of directives (see below) is
# provided through the "preprocess_line" method and directives that
# have common implementations can be handled with the
# "handle_common_directives" method.
# TODO: - Update comments on most functions explaining what they do.
import argparse
import atexit
import copy
import glob
import os
import re
import readline
import serial
import StringIO
import subprocess
import sys
import traceback
class AMForthException(Exception):
pass
class Behaviors(object):
"""Simple class for storing configurable processing behaviors"""
def __init__(self):
self.working_directory = os.getcwd()
self.filename = None
self.timeout = 15.0
self.quote_char_words = ["[char]", "char"]
self.start_string_words = ['s"', '."', 'abort"']
self.error_on_output = True
self.ignore_errors = False
self.directive_uncommented = True
self.directive_commented = True
self.expected_output_regexp = None
@property
def directive_config(self):
"Get the current directive configuration"
if self.directive_uncommented:
if self.directive_commented:
return "all"
else:
return "uncommented"
else:
if self.directive_commented:
return "commented"
else:
return "none"
@directive_config.setter
def directive_config(self, value):
"Set the directive configuration"
if value == "none":
self.directive_uncommented = False
self.directive_commented = False
elif value == "all":
self.directive_uncommented = True
self.directive_commented = True
elif value == "uncommented":
self.directive_uncommented = True
self.directive_commented = False
elif value == "commented":
self.directive_uncommented = False
self.directive_commented = True
else:
raise AMForthException("Unknown directive config: %s" % value)
class BehaviorManager(object):
"""Class for determining currently configured behavior
This class manages the lifetime of behaviors established through
configuration options and directives to minimize the impact of
that support on the AMForth class. """
def __init__(self):
self.default_behavior = Behaviors()
self.clear()
def clear(self):
"Clear out accumulated behavior"
self._next_line_behavior = None
self._current_line_behavior = None
self._file_behaviors = []
@property
def current_behavior(self):
"""The behavior currently in effect"""
if self._current_line_behavior:
return self._current_line_behavior
elif self._file_behaviors:
return self._file_behaviors[0]
else:
return self.default_behavior
def advance_line(self):
"""Call when changing to the next line"""
self._current_line_behavior = self._next_line_behavior
self._next_line_behavior = None
def push_file(self, filename):
"""Call when starting processing a new nested file"""
behavior = copy.deepcopy(self.current_behavior)
behavior.filename = filename
self._file_behaviors.insert(0, behavior)
def pop_file(self):
"""Call when returning from a nested file"""
del(self._file_behaviors[0])
@property
def next_line_behavior(self):
"""The behavior to use for the next line"""
return self._next_line_behavior
@next_line_behavior.setter
def next_line_behavior(self, behavior):
self._next_line_behavior = behavior
@property
def current_file_behavior(self):
"""The behavior for the current file.
Will raise an exception if there is no file currently."""
return self._file_behaviors[0]
@current_file_behavior.setter
def current_file_behavior(self, behavior):
self._file_behaviors[0] = behavior
class AMForth(object):
"Class for interacting with the AMForth interpreter"
amforth_error_cre = re.compile(" \?\? -\d+ \d+ \r\n> $")
upload_directives = [
"#cd", "#install", "#include", "#directive", "#ignore-error",
"#ignore-error-next", "#error-on-output", "#expect-output-next",
"#string-start-word", "#quote-char-word",
"#timeout", "#timeout-next", "#interact", "#exit"
]
interact_directives = [
"#cd", "#edit", "#install", "#include", "#directive", "#ignore-error",
"#error-on-output", "#string-start-word", "#quote-char-word",
"#timeout", "#timeout-next", "#update-words", "#exit",
"#update-cpu", "#update-files"
]
# standard words are usually uppercase, but amforth needs
# them in lowercase.
stdwords = [
# *** Wordset BLOCK
"BLK","BLOCK","BUFFER","EVALUATE","FLUSH","LOAD","SAVE-BUFFERS",
"UPDATE",
# *** Wordset BLOCK-EXT
"EMPTY-BUFFERS","LIST","REFILL","SCR","THRU",
# *** Wordset CORE
"#S","*/MOD","+LOOP","/MOD","0<","0=","1+","1-","2!",
"2*","2/","2@","2DROP","2DUP","2OVER","2SWAP",">BODY",
">IN",">NUMBER",">R","?DUP","ABORT","ABORT\"","ABS",
"ACCEPT","ALIGN","ALIGNED","ALLOT","AND","BASE","BEGIN",
"BL","C!","C,","C@","CELL+","CELLS","CHAR","CHAR+",
"CHARS","CONSTANT","COUNT","CR","CREATE","DECIMAL",
"DEPTH","DO","DOES>","DROP","DUP","ELSE","EMIT","ENVIRONMENT?",
"EVALUATE","EXECUTE","EXIT","FILL","FIND","FM/MOD",
"HERE","HOLD","I","IF","IMMEDIATE","INVERT","J","KEY",
"LEAVE","LITERAL","LOOP","LSHIFT","M*","MAX","MIN",
"MOD","MOVE","NEGATE","OR","OVER","POSTPONE","QUIT",
"R>","R@","RECURSE","REPEAT","ROT","RSHIFT","S\"","S>D",
"SIGN","SM/REM","SOURCE","SPACE","SPACES","STATE","SWAP",
"THEN","TYPE","U.","U<","UM*","UM/MOD","UNLOOP","UNTIL",
"VARIABLE","WHILE","WORD","XOR","[CHAR]",
# *** Wordset CORE-EXT
".R","0<>",
"0>","2>R","2R>","2R@",":NONAME","?DO","AGAIN","C\"",
"CASE","COMPILE,","ENDCASE","ENDOF","ERASE","FALSE",
"HEX","MARKER","NIP","OF","PAD","PARSE","PICK","REFILL",
"RESTORE-INPUT","ROLL","SAVE-INPUT","SOURCE-ID","TO",
"TRUE","TUCK","U.R","U>","UNUSED","VALUE","WITHIN",
"[COMPILE]",
# *** Wordset CORE-EXT-obsolescent
"#TIB","CONVERT","EXPECT","QUERY","SPAN",
"TIB",
# *** Wordset DOUBLE
"2CONSTANT","2LITERAL","2VARIABLE","D+","D-",
"D.","D.R","D0<","D0=","D2*","D2/","D<","D=","D>S",
"DABS","DMAX","DMIN","DNEGATE","M*/","M+",
# *** Wordset DOUBLE-EXT
"2ROT","DU<",
# *** Wordset EXCEPTION
"CATCH","THROW",
# *** Wordset EXCEPTION-EXT
"ABORT","ABORT\"",
# *** Wordset FACILITY
"AT-XY","KEY?","PAGE",
# *** Wordset FACILITY-EXT
"EKEY","EKEY>CHAR","EKEY?","EMIT?","MS","TIME&DATE",
# *** Wordset FILE
"BIN","CLOSE-FILE","CREATE-FILE","DELETE-FILE","FILE-POSITION",
"FILE-SIZE","INCLUDE-FILE","INCLUDED","OPEN-FILE","R/O",
"R/W","READ-FILE","READ-LINE","REPOSITION-FILE","RESIZE-FILE",
"S\"","SOURCE-ID","W/O","WRITE-FILE","WRITE-LINE",
# *** Wordset FILE-EXT
"FILE-STATUS",
"FLUSH-FILE","REFILL","RENAME-FILE",
# *** Wordset FLOAT
">FLOAT","D>F",
"F!","F*","F+","F-","F/","F0<","F0=","F<","F>D","F@",
"FALIGN","FALIGNED","FCONSTANT","FDEPTH","FDROP","FDUP",
"FLITERAL","FLOAT+","FLOATS","FLOOR","FMAX","FMIN",
"FNEGATE","FOVER","FROT","FROUND","FSWAP","FVARIABLE",
"REPRESENT",
# *** Wordset FLOAT-EXT
"DF!","DF@","DFALIGN","DFALIGNED","DFLOAT+",
"DFLOATS","F**","F.","FABS","FACOS","FACOSH","FALOG",
"FASIN","FASINH","FATAN","FATAN2","FATANH","FCOS","FCOSH",
"FE.","FEXP","FEXPM1","FLN","FLNP1","FLOG","FS.","FSIN",
"FSINCOS","FSINH","FSQRT","FTAN","FTANH","F~","PRECISION",
"SET-PRECISION","SF!","SF@","SFALIGN","SFALIGNED","SFLOAT+",
"SFLOATS",
# *** Wordset LOCAL
"(LOCAL)","TO",
# *** Wordset LOCAL-EXT
"LOCALS|",
# *** Wordset MEMORY
"ALLOCATE","FREE",
"RESIZE",
# *** Wordset SEARCH
"DEFINITIONS","FIND","FORTH-WORDLIST","GET-CURRENT",
"GET-ORDER","SEARCH-WORDLIST","SET-CURRENT","SET-ORDER",
"WORDLIST",
# *** Wordset SEARCH-EXT
"ALSO","FORTH","ONLY","ORDER","PREVIOUS",
# *** Wordset STRING
"-TRAILING","/STRING","BLANK","CMOVE","CMOVE>","COMPARE",
"SEARCH","SLITERAL",
# *** Wordset TOOLS
".S","DUMP","SEE","WORDS",
# *** Wordset TOOLS-EXT
";CODE",
"AHEAD","ASSEMBLER","BYE","CODE","CS-PICK","CS-ROLL",
"EDITOR","STATE","[ELSE]","[IF]","[THEN]",
# *** Wordset TOOLS-EXT-obsolescent
"FORGET",
]
def __init__(self, serial_port="/dev/amforth", rtscts=False, speed=38400):
self.debug = False
self.max_line_length = 80
self.progress_callback = self.print_progress
self.editor = None
self._serial_port = serial_port
self._serial_rtscts = rtscts
self._serial_speed = speed
self._serialconn = None
self._readline_initialized = False
self._amforth_dp = None
self._filedirs = {}
self._search_path = []
self._uploaded = set()
self._amforth_words = []
self._amforth_regs = {}
self._amforth_cpu = ""
self._last_error = ()
self._last_edited_file = None
self._config = BehaviorManager()
if os.environ.has_key("AMFORTH_LIB"):
self._search_list = os.environ["AMFORTH_LIB"].split(":")
else:
self._search_list=["."]
# define application constants to substitute
try:
ad_file = open("appl_defs.frt")
ad_line = ad_file.readline()
ad_mat = re.match("^\\\\\s+(\S.*)\n", ad_line)
if ad_mat:
self.progress_callback("Information", None, "appl_defs: " + ad_mat.group(1))
ad_pat=re.compile("^\s*(\S+)\s+constant\s+(\S+)\s")
ad_def = {}
while ad_line:
ad_mat = ad_pat.match(ad_line)
if ad_mat:
ad_def[ad_mat.group(2)] = ad_mat.group(1)
ad_line = ad_file.readline()
except:
ad_def = {}
self.progress_callback("Information", None, "appl_defs: %d loaded" % len(ad_def))
self._appl_defs = ad_def
@property
def serial_port(self):
"Serial port device attached to AMForth"
return self._serial_port
@serial_port.setter
def serial_port(self, value):
"""Set the serial port device attached to AMForth
If the value provided is different than the current value any
existing serial connection will be closed and a new connection
opened."""
if self._serial_port != value:
self._serial_port = value
self.serial_reconnect()
@property
def serial_rtscts(self):
"RTS/CTS enable of serial connection to AMForth"
return self._serial_rtscts
@serial_rtscts.setter
def serial_rtscts(self, value):
if self._serial_rtscts != value:
self._serial_rtscts = value
self.serial_reconnect()
@property
def serial_speed(self):
"Speed of the serial connection to AMForth"
return self._serial_speed
@serial_speed.setter
def serial_speed(self, value):
if self._serial_speed != value:
self._serial_speed = value
self.serial_reconnect()
@property
def serial_connected(self):
"Boolean status for whether currently connected to AMForth"
return self._serialconn is not None
def main(self):
"Main function called when module is used as a script"
upload_files, interact = self.parse_arg()
try:
for fn in upload_files:
if fn == "-":
self.interact()
else:
self.upload_file(fn, install=True)
if interact:
self.interact()
except AMForthException:
return 1
except KeyboardInterrupt:
print "\nAborted with keyboard interrupt"
except Exception, e:
print "\n---- Unexpected exception ----"
traceback.print_exc()
return 1
finally:
self.serial_disconnect()
if self._log:
self._log.close()
return 0
def parse_arg(self):
"Argument parsing used when module is used as a script"
parser = argparse.ArgumentParser(description="Interact with AMForth",
epilog="""
The environment variable AMFORTH_LIB can be set with to a colon (:) separated
list of directories that are recursivly searched for file names. If not set,
the current work directory is used instead.
The script assumes to be located in the standard amforth installation under
the tools/ directory. It uses files from the core/devices directories for
additional definitions (e.g. register names)
"""
)
parser.add_argument("--timeout", "-t", action="store",
type=float, default=15.0,
help="Response timeout (seconds, float value)")
parser.add_argument("--port", "-p", action="store",
type=str, default=self.serial_port, help="Serial port name")
parser.add_argument("--rtscts", action="store_true",
default=self.serial_rtscts, help="Serial port RTS/CTS enable")
parser.add_argument("--speed", "-s", action="store",
type=int, default=self.serial_speed, help="Serial port speed")
parser.add_argument("--log", type=argparse.FileType('w'),
help="Uploaded Forth log-file")
parser.add_argument("--line-length", "-l", action="store",
type=int, default=self.max_line_length,
help="Maximum length of amforth input line")
parser.add_argument("--interact", "-i", action="store_true",
help="Enter interactive prompt after upload")
parser.add_argument("--directive", "-d", action="store",
default="all",
help="Local directive configuration (where found)")
parser.add_argument("--editor", action="store",
default = os.environ.get("EDITOR", None),
help="Editor to use for #edit directive")
parser.add_argument("--no-error-on-output", action="store_true",
help="Indicate an error if upload causes output")
parser.add_argument("--ignore-error", action="store_true",
help="Ignore errors during upload (not recommended)")
parser.add_argument("--debug-serial", action="store_true",
help="Output extra info about serial transfers in stderr")
parser.add_argument("files", nargs="*", help="may be found via the environment variable AMFORTH_LIB")
arg = parser.parse_args()
self.debug = arg.debug_serial
self.max_line_length = arg.line_length
self._serial_port = arg.port
self._serial_rtscts = arg.rtscts
self._serial_speed = arg.speed
self._log = arg.log
self.editor = arg.editor
behavior = self._config.current_behavior
behavior.error_on_output = not arg.no_error_on_output
behavior.directive_config = arg.directive
behavior.timeout = arg.timeout
behavior.ignore_errors = arg.ignore_error
return arg.files, (arg.interact or len(arg.files) == 0)
def serial_connect(self, port=None, rtscts=None, speed=None):
"""Connect to AMForth on a serial port
The port and speed argument are optional. If not specified
the current values set in the object are used. These will be
the defaults if the have not been changed. If either is
specified corresponding property of the instance will be
updated to the new value.
This is safe to call even if a connection already exists as
existing an existing connection will be closed before the new
connection is made."""
if port != None:
self.serial_port = port
if rtscts != None:
self.serial_rtscts = rtscts
if speed != None:
self.serial_speed = speed
if self._serialconn:
self.serial_disconnect()
try:
timeout = self._config.current_behavior.timeout
self._serialconn = serial.Serial(self.serial_port,
self.serial_speed,
serial.EIGHTBITS,
serial.PARITY_NONE,
serial.STOPBITS_ONE,
timeout, False,
self.serial_rtscts,
None, False)
except serial.SerialException, e:
raise AMForthException("Serial port connect failure: %s" % str(e))
def serial_disconnect(self):
"""Disconnect the serial connection to AMForth
This is safe to call even if there is currently no connection."""
if self._serialconn:
self._serialconn.close()
self._serialconn = None
def serial_reconnect(self):
"""Reconnect the serial connection to AMForth
This is the same as calling serial_connect while there is an
existing connection. It is provided to make the clear when
the intent is to re-establish an existing connection (usually
to apply new settings) versus creating a new connectoion."""
self.serial_connect()
def find_prompt(self):
"Attempt to find a prompt by sending a newline and verifying echo"
if not self.serial_connected:
self.serial_connect()
# Use a short timeout to quickly detect if can't communicate
self._serialconn.timeout = 2.0
try:
try:
self.send_line("\n") # Get empty line echo to make sure ready
self.read_response() # Throw away the response.
except serial.SerialException, e:
self.progress_callback("Error", None, str(e))
raise AMForthException("Failed to get prompt: %s" % str(e))
finally:
# Restore the current timeout
self._serialconn.timeout = self._config.current_behavior.timeout
def upload_file(self, filename, install=False):
if not install and filename in self._uploaded:
return False
else:
self._uploaded.add(filename)
self._update_files()
if os.path.dirname(filename):
fpath=filename
self.progress_callback("Information", None, "using "+ filename+" verbatim")
else:
if not self._filedirs.has_key(filename):
self.progress_callback("Error", None, "file "+ filename+" not found in search path")
raise AMForthException("file " + filename + " not found in search path")
if len(self._filedirs[filename])!=1:
# oops, too many files or no one at all no file found?
raise AMForthException("Wrong # of file occurances: " + filename + " ("+str(len(self._filedirs[filename]))+")")
self.progress_callback("Information", None, "using "+ filename+" from"+ self._filedirs[filename][0])
fpath = os.path.join(self._filedirs[filename][0], filename)
self._config.push_file(fpath)
fdir=os.path.dirname(fpath)
print "**** " + self._config.current_behavior.working_directory
if os.path.isabs(fdir):
dirpath = os.path.normpath(fdir)
else:
oldpath = self._config.current_behavior.working_directory
dirpath = os.path.normpath(os.path.join(oldpath, fdir))
self._config.current_behavior.working_directory = dirpath
try:
try:
self.find_prompt()
except AMForthException, e:
self.progress_callback("Error", None, str(e))
raise
self._update_cpu()
self.progress_callback("File", None, fpath)
try:
with open(fpath, "r") as f:
self._send_file_contents(f)
except (OSError, IOError), e:
self.progress_callback("Error", None, str(e))
raise AMForthException("Unknown file: " + fpath)
self._last_error = ()
finally:
print "**** " + self._config.current_behavior.working_directory
self._config.pop_file()
self._serialconn.timeout = self._config.current_behavior.timeout
try:
os.chdir(self._config.current_behavior.working_directory)
except OSError, e:
errmsg = ("Failed to change to directory '%s': %s"
% (self._config.current_behavior.working_directory,
str(e)))
self.progress_callback("Error", None, errmsg)
raise AMForthException(errmsg)
return True
def _send_file_contents(self, f):
in_comment = False
lineno = 0
for full_line in f:
self._config.advance_line()
self._serialconn.timeout = self._config.current_behavior.timeout
try:
os.chdir(self._config.current_behavior.working_directory)
except OSError, e:
errmsg = ("Failed to change to directory '%s': %s"
% (self._config.current_behavior.working_directory,
str(e)))
self.progress_callback("Error", None, errmsg)
raise AMForthException(errmsg)
lineno += 1
if full_line and full_line[-1] == "\n":
full_line = full_line[:-1]
if full_line and full_line[-1] == "\r":
full_line = full_line[:-1]
line = full_line.strip()
if len(line) == 0:
if in_comment:
self.progress_callback("Comment", lineno, full_line)
else:
self.progress_callback("Whitespace", lineno, full_line)
continue
try:
(line, in_comment,
directive,
directive_arg) = self.preprocess_line(full_line, in_comment,
self.upload_directives)
except AMForthException, e:
self._record_error(lineno)
self.progress_callback("Error", lineno, full_line)
self.progress_callback("Error", None, str(e))
raise
if directive:
self.progress_callback("Directive", lineno, full_line)
if directive == "#exit":
break
elif directive == "#interact":
self.interact()
continue
self.handle_common_directives(directive, directive_arg)
continue
if len(line) == 0:
self.progress_callback("Comment", lineno, full_line)
continue
try:
self.send_line(line)
except AMForthException, e:
self._record_error(lineno)
self.progress_callback("Error", lineno, full_line)
self.progress_callback("Error", None, str(e))
raise
response = self.read_response()
self.progress_callback("Sent", lineno, full_line)
if response[-3:] == " ok":
if len(response) > 3:
for l in StringIO.StringIO(response[:-3]):
self.progress_callback("Output", lineno, l.rstrip())
r = self._config.current_behavior.expected_output_regexp
if r:
m = re.match(r, response[:-3], re.MULTILINE)
response_ok = m is not None
else:
response_ok = False
if not response_ok:
if self._config.current_behavior.error_on_output:
errmsg = "Unexpected output after line."
errmsg += " To allow, specify --no-error-on-output."
self.progress_callback("Error", lineno, errmsg)
if not self._config.current_behavior.ignore_errors:
self._record_error(lineno)
raise AMForthException(errmsg)
elif self._log:
self._log.write(line + "\n")
else:
self.progress_callback("Error", None, response)
if not self._config.current_behavior.ignore_errors:
self._record_error(lineno)
raise AMForthException("Error in line sent")
def preprocess_line(self, line, in_delim_comment=False, directives=[]):
# Compresses whitespace, including comments so send minimum
# data to atmega
result = []
comment_words = []
char_quote = False
in_string = False
in_line_comment = False
directive = None
directive_arg = []
words = self._split_space_or_tab(line)
for iw,w in enumerate(words):
if in_string:
try:
i = w.index('"')
except ValueError:
result[-1] += " " + w
continue
in_string = False
result[-1] += " " + w[:i+1]
result[-1] = result[-1][1:] # remove extra initial space
w = w[i+1:]
if in_delim_comment:
try:
i = w.index(")")
except ValueError:
pass
else:
in_delim_comment = False
w = w[i+1:]
if not w:
continue
if w in self._appl_defs:
w = self._appl_defs[w]
elif w in self._amforth_regs:
w = self._amforth_regs[w]
elif w.upper() in self.stdwords:
w = w.lower()
if char_quote:
result.append(w)
char_quote = False
continue
if w == "(":
if not in_delim_comment:
in_delim_comment = True
else:
raise AMForthException("Illegal nested comment")
continue
if not in_delim_comment and not in_line_comment:
if w == "\\" and (iw == 0 or words[iw-1] != "postpone"):
in_line_comment = True
continue
elif w in self._config.current_behavior.start_string_words:
in_string = True
result.append(w)
result.append('')
continue
if w in self._config.current_behavior.quote_char_words:
char_quote = True # no continue deliberately
if directive:
directive_arg.append(w)
else:
if (self._config.current_behavior.directive_uncommented
and not result
and w in directives):
directive = w
else:
result.append(w)
else:
if directive:
directive_arg.append(w)
else:
if (self._config.current_behavior.directive_commented
and not result
and not comment_words
and w in directives):
directive = w
else:
comment_words.append(w)
if directive and len(result):
raise AMForthError("Directive must not have other content: %s",
" ".join(result))
return (" ".join(result), in_delim_comment,
directive, " ".join(directive_arg))
def _record_error(self, lineno):
fn = self._config.current_behavior.filename
if fn:
self._last_error = (fn, lineno)
def _split_space_or_tab(self, line):
result = [""]
for c in line:
if c == " " or c == "\t":
result.append("")
else:
result[-1] += c
return result
def handle_common_directives(self, directive, directive_arg):
if directive == "#include" or directive == "#install":
fn = directive_arg.strip()
if self.upload_file(fn, directive == "#install"):
resume_fn = self._config.current_behavior.filename
if resume_fn:
self.progress_callback("File", None, resume_fn + " (resumed)")
else:
self.progress_callback("Information", None, "already uploaded")
elif directive == "#cd":
dirname = directive_arg.strip()
if os.path.isabs(dirname):
dirpath = os.path.normpath(dirname)
else:
oldpath = self._config.current_behavior.working_directory
dirpath = os.path.normpath(os.path.join(oldpath, dirname))
self._config.current_behavior.working_directory = dirpath
elif directive == "#timeout":
try:
timeout = float(directive_arg)
except ValueError, e:
self.progress_callback("Error", None, "Invalid timeout")
return
self._config.current_file_behavior.timeout = timeout
elif directive == "#timeout-next":
try:
timeout = float(directive_arg)
except ValueError, e:
self.progress_callback("Error", None, "Invalid timeout")
return
behavior = copy.deepcopy(self._config.current_behavior)
behavior.timeout = timeout
self._config.next_line_behavior = behavior
elif directive == "#ignore-error":
v = self._yes_or_no_arg(directive_arg)
self._config.current_file_behavior.ignore_errors = v
elif directive == "#ignore-error-next":
v = self._yes_or_no_arg(directive_arg)
behavior = copy.deepcopy(self._config.current_behavior)
behavior.ignore_errors = v
self._config.next_line_behavior = behavior
elif directive == "#error-on-output":
v = self._yes_or_no_arg(directive_arg)
behavior = self._config.current_file_behavior
behavior.error_on_output = v
elif directive == "#expect-output-next":
regexp = directive_arg.strip()
if not regexp:
regexp = ".*"
behavior = copy.deepcopy(self._config.current_behavior)
behavior.expected_output_regexp = regexp
self._config.next_line_behavior = behavior
elif directive == "#start-string-word":
behavior = self._config.current_file_behavior
behavior.start_string_words.append(directive_arg.strip().split(" "))
elif directive == "#quote-char-word":
behavior = self._config.current_file_behavior
behavior.quote_char_words.append(directive_arg.strip().split(" "))
elif directive == "#directive":
behavior = self._config.current_file_behavior
behavior.directive_config = directive_arg.strip()
else:
errmsg = "Unknown directive: %s %s" % (directive, directive_arg)
raise AMForthException(errmsg)
def _yes_or_no_arg(self, directive_arg):
if not directive_arg:
return True
else:
if directive_arg.lower() == "yes":
return True
elif directive_arg.lower() == "no":
return False
else:
errmsg = "Invalid directive argument. Must be yes or no."
raise AMForthExcetion(errmsg)
def send_line(self, line):
if len(line) > self.max_line_length - 1: # For newline
raise AMForthException("Input line > %d char"
% self.max_line_length)
if self.debug:
sys.stderr.write("|a( )" + repr(line)[1:-1] + "\n")
sys.stderr.write("|s( )")
for c in line + "\n":
if self.debug:
sys.stderr.write(repr(c)[1:-1]+"->")
sys.stderr.flush()
self._serialconn.write(c)
self._serialconn.flush()
r = self._serialconn.read(1) # Read echo of character we just sent
while r and (r != c or (c == '\t' and r != ' ')):
if self.debug:
sys.stderr.write(repr(r)[1:-1])
sys.stderr.flush()
r = self._serialconn.read(1)
if not r:
raise AMForthException("Input character not echoed.")
if self.debug:
sys.stderr.write(repr(r)[1:-1] + "|")
sys.stderr.flush()
if self.debug:
sys.stderr.write("\n")
def read_response(self):
if self.debug:
sys.stderr.write("|r( )")
response = ""
r = self._serialconn.read(1)
while r != "":
if self.debug:
sys.stderr.write(repr(r)[1:-1])
sys.stderr.flush()
response = response + r
if response[-3:] == " ok":
# Interactive prompt read and discarded while handling
# echo of next line sent.
break
elif self.amforth_error_cre.search(response) is not None:
response = response[:-3] # Don't return prompt in response
break
r = self._serialconn.read(1)
if not response:
response = "Timed out waiting for ok response"
if self.debug:
sys.stderr.write("\n")
return response
def print_progress(self, type, lineno, info):
if not lineno:
print "|%s=%s" % (type[:1], info)
else:
print "|%s|%5d|%s" % (type[:1], lineno, info)
def interact(self):
self.progress_callback("Interact", None,
"Entering amforth interactive interpreter")
# Use null filename "file" to capture interactive config
self._config.push_file(None)
try:
self.find_prompt()
except AMForthException, e:
self.progress_callback("Error", None, str(e))
self._config.pop_file()
raise
self._init_readline()
in_comment = False
while True:
try:
if self._amforth_cpu:
prompt="("+self._amforth_cpu+")> "
else:
prompt="> "
full_line = raw_input(prompt)
except EOFError, e:
print ""
break
self._config.advance_line()
self._serialconn.timeout = self._config.current_behavior.timeout
try:
os.chdir(self._config.current_behavior.working_directory)
except OSError, e:
errmsg = ("Failed to change to directory '%s': %s"
% (self._config.current_behavior.working_directory,
str(e)))
self.progress_callback("Error", None, errmsg)
raise AMForthException(errmsg)
(line, in_comment,
directive,
directive_arg) = self.preprocess_line(full_line, in_comment,
self.interact_directives)
try:
if directive:
self.progress_callback("Directive", None, full_line)
if directive == "#exit":
break
elif directive == "#update-words":
self._update_words()
continue
elif directive == "#update-cpu":
self._update_cpu()
continue
elif directive == "#update-files":
self._update_files()
continue
elif directive == "#edit":
if directive_arg:
self.edit_file(directive_arg.strip())
elif self._last_error:
self.edit_file(*self._last_error)
elif self._last_edited_file:
self.edit_file(self._last_edited_file)
else:
print "No file to edit"
continue
self.handle_common_directives(directive, directive_arg)
if directive == "#include" or directive == "#install":
self._update_words()
continue
if in_comment or not line:
continue
else:
self.send_line(line)
print self.read_response()
except AMForthException, e:
print "Error: " + str(e)
self._config.pop_file()
self._serialconn.timeout = self._config.current_behavior.timeout
try:
os.chdir(self._config.current_behavior.working_directory)
except OSError, e:
errmsg = ("Failed to change to directory '%s': %s"
% (self._config.current_behavior.working_directory,
str(e)))
self.progress_callback("Error", None, errmsg)
raise AMForthException(errmsg)
self.progress_callback("Interact", None,
"Leaving interactive interpreter")
def _init_readline(self):
if not self._readline_initialized:
readline.set_completer_delims(" ")
readline.set_completer(self._rlcompleter)
readline.parse_and_bind("tab: complete")
histfn = os.path.join(os.path.expanduser("~"),
".frt-interact.history")
try:
readline.read_history_file(histfn)
except IOError, e:
pass
self._update_words()
self._update_cpu()
self._update_files()
atexit.register(readline.write_history_file, histfn)
def _update_words(self):
# get all words that are available in the search order
self.send_line("base @ decimal dp u. base !")
dp = self.read_response()
if dp[-3:] != " ok":
return # Something went wrong, just silently ignore
dp = int(dp[:-3])
if self._amforth_dp != dp:
self._amforth_dp = dp
self.send_line("words")
words = self.read_response()
if words[-3:] != " ok":
return # Something went wrong, just silently ignore
self._amforth_words = words.split(" ") + self.interact_directives
def _update_cpu(self):
self.progress_callback("Information", None, "getting MCU name..")
self.send_line("s\" cpu\" environment search-wordlist drop execute itype")
words = self.read_response()
if words[-3:] != " ok":
return # Something went wrong, just silently ignore
mcudef = words[:-3].lower()
self._amforth_regs = {}
sys.path.insert(1,os.path.join(os.path.dirname(sys.argv[0]),"..", "core", "devices",mcudef))
try:
from device import MCUREGS
self._amforth_regs=MCUREGS
self._amforth_cpu = words[:-3]
self.progress_callback("Information", None, "successfully loaded register definitions for " + mcudef)
except:
self.progress_callback("Warning", None, "failed loading register definitions for " + mcudef + " .. continuing")
def _update_files(self):
self.progress_callback("Information", None, "getting filenames on the host")
self._filedirs = {}
for p in self._search_list:
self.progress_callback("Information", None, " Reading "+p)
for root, dirs, files in os.walk(p):
for f in files:
fpath=os.path.realpath(os.path.join(root, f))
fpathdir=os.path.dirname(fpath)
if self._filedirs.has_key(f):
# check for duplicates
for d in self._filedirs[f]:
if d==fpathdir:
fpath=None
if fpath: self._filedirs[f].append(fpathdir)
else:
self._filedirs[f]=[fpathdir]
def _rlcompleter(self, text, state):
if state == 0:
line_words = readline.get_line_buffer().split(" ")
if line_words and line_words[-1] == text:
line_words = line_words[:-1]
while line_words and line_words[-1] == "":
line_words = line_words[:-1]
if line_words:
if line_words[-1] in ["#install", "#include", "#edit"]:
self._rl_matches = [f for f in self._filedirs.keys()
if f.startswith(text)]
elif line_words[-1] == "#cd":
fnames = glob.glob(text + '*')
self._rl_matches = [f + "/" for f in fnames
if os.path.isdir(f)]
elif line_words[-1] == "#directive":
self._rl_matches = [w for w in ("all ", "uncommented ",
"commented ", "none ")
if w.startswith(text)]
elif line_words[-1] in ["#error-on-output",
"#ignore-error", "#ignore-error-next"]:
self._rl_matches = [w for w in ["yes", "no"]
if w.startswith(text)]
elif line_words[-1] in ["#exit", "#update-words",
"#timeout", "#timeout-next"]:
self._rl_matches = []
else:
self._rl_matches = [w + " " for w in self._amforth_words+self._amforth_regs.keys()
if not text or w.startswith(text)]
else:
self._rl_matches = [w + " " for w in self._amforth_words+self._amforth_regs.keys()
if not text or w.startswith(text)]
if self._rl_matches:
return self._rl_matches[0]
else:
return None
else:
if state < len(self._rl_matches):
return self._rl_matches[state]
else:
return None
def edit_file(self, filename, lineno=0):
if self.editor:
# Have to construct command line differently for different
# editors to be able to move to specific line...
exename = os.path.basename(self.editor)
if exename in ["emacs", "emacsclient", "nano"]:
cmd = [self.editor, "+" + str(lineno), filename]
elif exename in ["vi", "vim"]:
cmd = [self.editor, filename, "+" + str(lineno)]
elif exename == "mcedit":
cmd = [self.editor, " " + filename, ":" + str(lineno)]
elif exename == "gedit":
cmd = [self.editor, "-b", filename, "+" + str(lineno)]
elif exename == "pn.exe":
cmd = [self.editor, " --line", " "+str(lineno)+" ", filename]
else:
cmd = [self.editor, filename]
try:
subprocess.call(cmd)
self._last_edited_file = filename
except OSError, e:
raise AMForthException("Could not start editor: "+self.editor)
else:
raise AMForthException("No editor specified. Use --editor or EDITOR environment variable")
if __name__ == "__main__":
sys.exit(AMForth().main())
|
gpl-2.0
|
Benoss/django-elasticsearch-debug-toolbar
|
elastic_panel/test/test_toolbar.py
|
1
|
1590
|
# -*- coding: utf-8 -*-
import unittest
from django.conf import settings
settings.configure()
from debug_toolbar.toolbar import DebugToolbar
from django.http import HttpResponse
from django.test import RequestFactory
from elasticsearch.connection import Connection
from elastic_panel import panel
class ImportTest(unittest.TestCase):
def test_input(self):
panel.ElasticQueryInfo("GET", "asdasd", "asdasd", "{}", 200, "adssad", 1)
panel.ElasticQueryInfo("GET", "asdasd", "asdasd", "", 200, "adssad", 1)
panel.ElasticQueryInfo("GET", "asdasd", "asdasd", None, 200, "adssad", 1)
panel.ElasticQueryInfo("GET", "asdasd", "asdasd", "{'asddsa': 'é'}", 200, "adssad", 1)
panel.ElasticQueryInfo("GET", "asdasd", "asdasd", b"{'asddsa': 'asddasds'}", 200, "adssad", 1)
class PanelTests(unittest.TestCase):
def setUp(self):
self.get_response = lambda request: HttpResponse()
self.request = RequestFactory().get("/")
self.toolbar = DebugToolbar(self.request, self.get_response)
self.panel = panel.ElasticDebugPanel(self.toolbar, self.get_response)
def test_recording(self, *args):
response = self.panel.process_request(self.request)
Connection().log_request_success("GET", "asdasd", "asdasd", "{}", 200, "adssad", 1)
self.assertIsNotNone(response)
self.panel.generate_stats(self.request, response)
stats = self.panel.get_stats()
self.assertIn("records", stats)
self.assertEqual(len(stats["records"]), 1)
if __name__ == "__main__":
unittest.main()
|
mit
|
dr-venkman/TizenRT
|
os/drivers/sercomm/loadwriter.py
|
13
|
1050
|
#!/usr/bin/python
###########################################################################
#
# Copyright 2016 Samsung Electronics All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
###########################################################################
from socket import *
import time
SOCKET_NAME = '/tmp/osmocom_loader'
s = socket(AF_UNIX, SOCK_STREAM)
s.connect(SOCKET_NAME)
while 1:
try:
x = raw_input(">")
y = len(x) + 1
s.send(chr(y>>8) + chr(y&255) + x + "\n")
except:
print ''
break
s.close()
|
apache-2.0
|
asmeurer/urllib3
|
test/test_retry.py
|
1
|
5845
|
import unittest
from urllib3.packages.six.moves import xrange
from urllib3.util.retry import Retry
from urllib3.exceptions import (
ConnectTimeoutError,
ReadTimeoutError,
MaxRetryError
)
class RetryTest(unittest.TestCase):
def test_string(self):
""" Retry string representation looks the way we expect """
retry = Retry()
self.assertEqual(str(retry), 'Retry(total=10, connect=None, read=None, redirect=None)')
for _ in range(3):
retry = retry.increment()
self.assertEqual(str(retry), 'Retry(total=7, connect=None, read=None, redirect=None)')
def test_retry_both_specified(self):
"""Total can win if it's lower than the connect value"""
error = ConnectTimeoutError()
retry = Retry(connect=3, total=2)
retry = retry.increment(error=error)
retry = retry.increment(error=error)
try:
retry.increment(error=error)
self.fail("Failed to raise error.")
except MaxRetryError as e:
self.assertEqual(e.reason, error)
def test_retry_higher_total_loses(self):
""" A lower connect timeout than the total is honored """
error = ConnectTimeoutError()
retry = Retry(connect=2, total=3)
retry = retry.increment(error=error)
retry = retry.increment(error=error)
self.assertRaises(MaxRetryError, retry.increment, error=error)
def test_retry_higher_total_loses_vs_read(self):
""" A lower read timeout than the total is honored """
error = ReadTimeoutError(None, "/", "read timed out")
retry = Retry(read=2, total=3)
retry = retry.increment(error=error)
retry = retry.increment(error=error)
self.assertRaises(MaxRetryError, retry.increment, error=error)
def test_retry_total_none(self):
""" if Total is none, connect error should take precedence """
error = ConnectTimeoutError()
retry = Retry(connect=2, total=None)
retry = retry.increment(error=error)
retry = retry.increment(error=error)
try:
retry.increment(error=error)
self.fail("Failed to raise error.")
except MaxRetryError as e:
self.assertEqual(e.reason, error)
error = ReadTimeoutError(None, "/", "read timed out")
retry = Retry(connect=2, total=None)
retry = retry.increment(error=error)
retry = retry.increment(error=error)
retry = retry.increment(error=error)
self.assertFalse(retry.is_exhausted())
def test_retry_default(self):
""" If no value is specified, should retry connects 3 times """
retry = Retry()
self.assertEqual(retry.total, 10)
self.assertEqual(retry.connect, None)
self.assertEqual(retry.read, None)
self.assertEqual(retry.redirect, None)
error = ConnectTimeoutError()
retry = Retry(connect=1)
retry = retry.increment(error=error)
self.assertRaises(MaxRetryError, retry.increment, error=error)
retry = Retry(connect=1)
retry = retry.increment(error=error)
self.assertFalse(retry.is_exhausted())
self.assertTrue(Retry(0).raise_on_redirect)
self.assertFalse(Retry(False).raise_on_redirect)
def test_retry_read_zero(self):
""" No second chances on read timeouts, by default """
error = ReadTimeoutError(None, "/", "read timed out")
retry = Retry(read=0)
try:
retry.increment(error=error)
self.fail("Failed to raise error.")
except MaxRetryError as e:
self.assertEqual(e.reason, error)
def test_backoff(self):
""" Backoff is computed correctly """
max_backoff = Retry.BACKOFF_MAX
retry = Retry(total=100, backoff_factor=0.2)
self.assertEqual(retry.get_backoff_time(), 0) # First request
retry = retry.increment()
self.assertEqual(retry.get_backoff_time(), 0) # First retry
retry = retry.increment()
self.assertEqual(retry.backoff_factor, 0.2)
self.assertEqual(retry.total, 98)
self.assertEqual(retry.get_backoff_time(), 0.4) # Start backoff
retry = retry.increment()
self.assertEqual(retry.get_backoff_time(), 0.8)
retry = retry.increment()
self.assertEqual(retry.get_backoff_time(), 1.6)
for i in xrange(10):
retry = retry.increment()
self.assertEqual(retry.get_backoff_time(), max_backoff)
def test_zero_backoff(self):
retry = Retry()
self.assertEqual(retry.get_backoff_time(), 0)
retry = retry.increment()
retry = retry.increment()
self.assertEqual(retry.get_backoff_time(), 0)
def test_sleep(self):
# sleep a very small amount of time so our code coverage is happy
retry = Retry(backoff_factor=0.0001)
retry = retry.increment()
retry = retry.increment()
retry.sleep()
def test_status_forcelist(self):
retry = Retry(status_forcelist=xrange(500,600))
self.assertFalse(retry.is_forced_retry('GET', status_code=200))
self.assertFalse(retry.is_forced_retry('GET', status_code=400))
self.assertTrue(retry.is_forced_retry('GET', status_code=500))
retry = Retry(total=1, status_forcelist=[418])
self.assertFalse(retry.is_forced_retry('GET', status_code=400))
self.assertTrue(retry.is_forced_retry('GET', status_code=418))
def test_exhausted(self):
self.assertFalse(Retry(0).is_exhausted())
self.assertTrue(Retry(-1).is_exhausted())
self.assertEqual(Retry(1).increment().total, 0)
def test_disabled(self):
self.assertRaises(MaxRetryError, Retry(-1).increment)
self.assertRaises(MaxRetryError, Retry(0).increment)
|
mit
|
MiroK/dolfin
|
test/unit/python/fem/test_solving.py
|
3
|
2436
|
#!/usr/bin/env py.test
"""Unit tests for the solve function"""
# Copyright (C) 2011 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
import pytest
from dolfin import *
from dolfin_utils.test import *
@use_gc_barrier
def test_bcs():
"Check that the bcs argument is picked up"
mesh = UnitSquareMesh(4, 4)
V = FunctionSpace(mesh, "Lagrange", 1)
u = TrialFunction(V)
v = TestFunction(V)
f = Constant(100.0)
a = dot(grad(u), grad(v))*dx + u*v*dx
L = f*v*dx
bc = DirichletBC(V, 0.0, DomainBoundary())
# Single bc argument
u1 = Function(V)
solve(a == L, u1, bc)
# List of bcs
u2 = Function(V)
solve(a == L, u2, [bc])
# Single bc keyword argument
u3 = Function(V)
solve(a == L, u3, bcs=bc)
# List of bcs keyword argument
u4 = Function(V)
solve(a == L, u4, bcs=[bc])
# Check all solutions
assert round(u1.vector().norm("l2") - 14.9362601686, 10) == 0
assert round(u2.vector().norm("l2") - 14.9362601686, 10) == 0
assert round(u3.vector().norm("l2") - 14.9362601686, 10) == 0
assert round(u4.vector().norm("l2") - 14.9362601686, 10) == 0
@use_gc_barrier
def test_calling():
"Test that unappropriate arguments are not allowed"
mesh = UnitSquareMesh(4, 4)
V = FunctionSpace(mesh, "Lagrange", 1)
u = TrialFunction(V)
v = TestFunction(V)
f = Constant(100.0)
a = dot(grad(u), grad(v))*dx + u*v*dx
L = f*v*dx
bc = DirichletBC(V, 0.0, DomainBoundary())
kwargs = {"solver_parameters":{"linear_solver": "lu"},
"form_compiler_parameters":{"optimize": True}}
A = assemble(a)
b = assemble(L)
x = Vector()
with pytest.raises(RuntimeError):
solve(A, x, b, **kwargs)
# FIXME: Include more tests for this versatile function
|
gpl-3.0
|
sarahgrogan/scikit-learn
|
sklearn/feature_selection/base.py
|
70
|
4221
|
# -*- coding: utf-8 -*-
"""Generic feature selection mixin"""
# Authors: G. Varoquaux, A. Gramfort, L. Buitinck, J. Nothman
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from warnings import warn
import numpy as np
from scipy.sparse import issparse, csc_matrix
from ..base import TransformerMixin
from ..utils import check_array, safe_mask
from ..externals import six
class SelectorMixin(six.with_metaclass(ABCMeta, TransformerMixin)):
"""
Tranformer mixin that performs feature selection given a support mask
This mixin provides a feature selector implementation with `transform` and
`inverse_transform` functionality given an implementation of
`_get_support_mask`.
"""
def get_support(self, indices=False):
"""
Get a mask, or integer index, of the features selected
Parameters
----------
indices : boolean (default False)
If True, the return value will be an array of integers, rather
than a boolean mask.
Returns
-------
support : array
An index that selects the retained features from a feature vector.
If `indices` is False, this is a boolean array of shape
[# input features], in which an element is True iff its
corresponding feature is selected for retention. If `indices` is
True, this is an integer array of shape [# output features] whose
values are indices into the input feature vector.
"""
mask = self._get_support_mask()
return mask if not indices else np.where(mask)[0]
@abstractmethod
def _get_support_mask(self):
"""
Get the boolean mask indicating which features are selected
Returns
-------
support : boolean array of shape [# input features]
An element is True iff its corresponding feature is selected for
retention.
"""
def transform(self, X):
"""Reduce X to the selected features.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
X_r : array of shape [n_samples, n_selected_features]
The input samples with only the selected features.
"""
X = check_array(X, accept_sparse='csr')
mask = self.get_support()
if not mask.any():
warn("No features were selected: either the data is"
" too noisy or the selection test too strict.",
UserWarning)
return np.empty(0).reshape((X.shape[0], 0))
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X, accept_sparse='csr')[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""
Reverse the transformation operation
Parameters
----------
X : array of shape [n_samples, n_selected_features]
The input samples.
Returns
-------
X_r : array of shape [n_samples, n_original_features]
`X` with columns of zeros inserted where features would have
been removed by `transform`.
"""
if issparse(X):
X = X.tocsc()
# insert additional entries in indptr:
# e.g. if transform changed indptr from [0 2 6 7] to [0 2 3]
# col_nonzeros here will be [2 0 1] so indptr becomes [0 2 2 3]
it = self.inverse_transform(np.diff(X.indptr).reshape(1, -1))
col_nonzeros = it.ravel()
indptr = np.concatenate([[0], np.cumsum(col_nonzeros)])
Xt = csc_matrix((X.data, X.indices, indptr),
shape=(X.shape[0], len(indptr) - 1), dtype=X.dtype)
return Xt
support = self.get_support()
X = check_array(X)
if support.sum() != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size), dtype=X.dtype)
Xt[:, support] = X
return Xt
|
bsd-3-clause
|
chauhanhardik/populo_2
|
lms/djangoapps/verify_student/migrations/0009_auto__change_softwaresecurephotoverification_window_id_default_none.py
|
84
|
9796
|
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self):
# Changing field 'SoftwareSecurePhotoVerification.window'. Setting its default value to None
if db.backend_name == 'mysql':
db.execute('ALTER TABLE verify_student_softwaresecurephotoverification CHANGE `window_id` `window_id` int(11) DEFAULT NULL;')
def backwards(self, orm):
# Changing field 'SoftwareSecurePhotoVerification.window'
db.alter_column('verify_student_softwaresecurephotoverification', 'window_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['reverification.MidcourseReverificationWindow']))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reverification.midcoursereverificationwindow': {
'Meta': {'object_name': 'MidcourseReverificationWindow'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'verify_student.incoursereverificationconfiguration': {
'Meta': {'ordering': "('-change_date',)", 'object_name': 'InCourseReverificationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'verify_student.skippedreverification': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'SkippedReverification'},
'checkpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skipped_checkpoint'", 'to': "orm['verify_student.VerificationCheckpoint']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'verify_student.softwaresecurephotoverification': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'SoftwareSecurePhotoVerification'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'display': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'error_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'face_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_key': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'default': "'42ae367f-f6eb-456b-84c8-a3fd2baf4208'", 'max_length': '255', 'db_index': 'True'}),
'reviewing_service': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'reviewing_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'photo_verifications_reviewed'", 'null': 'True', 'to': "orm['auth.User']"}),
'status': ('model_utils.fields.StatusField', [], {'default': "'created'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'window': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['reverification.MidcourseReverificationWindow']"})
},
'verify_student.verificationcheckpoint': {
'Meta': {'unique_together': "(('course_id', 'checkpoint_location'),)", 'object_name': 'VerificationCheckpoint'},
'checkpoint_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo_verification': ('django.db.models.fields.related.ManyToManyField', [], {'null': 'True', 'to': "orm['verify_student.SoftwareSecurePhotoVerification']"})
},
'verify_student.verificationstatus': {
'Meta': {'object_name': 'VerificationStatus'},
'checkpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'checkpoint_status'", 'to': "orm['verify_student.VerificationCheckpoint']"}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['verify_student']
|
agpl-3.0
|
koobonil/Boss2D
|
Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/learn/python/learn/utils/__init__.py
|
112
|
1300
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Learn Utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.utils.export import export_estimator
from tensorflow.contrib.learn.python.learn.utils.input_fn_utils import build_default_serving_input_fn
from tensorflow.contrib.learn.python.learn.utils.input_fn_utils import build_parsing_serving_input_fn
from tensorflow.contrib.learn.python.learn.utils.input_fn_utils import InputFnOps
from tensorflow.contrib.learn.python.learn.utils.saved_model_export_utils import make_export_strategy
|
mit
|
hansey/youtube-dl
|
youtube_dl/downloader/common.py
|
95
|
13848
|
from __future__ import division, unicode_literals
import os
import re
import sys
import time
from ..compat import compat_str
from ..utils import (
encodeFilename,
decodeArgument,
format_bytes,
timeconvert,
)
class FileDownloader(object):
"""File Downloader class.
File downloader objects are the ones responsible of downloading the
actual video file and writing it to disk.
File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead.
Available options:
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
ratelimit: Download speed limit, in bytes/sec.
retries: Number of times to retry for HTTP error 5xx
buffersize: Size of download buffer in bytes.
noresizebuffer: Do not automatically resize the download buffer.
continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar.
logtostderr: Log messages to stderr instead of stdout.
consoletitle: Display progress in console window's titlebar.
nopart: Do not use temporary .part files.
updatetime: Use the Last-modified header to set output file timestamps.
test: Download only first bytes to test the downloader.
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
(experimenatal)
external_downloader_args: A list of additional command-line arguments for the
external downloader.
Subclasses of this one must re-define the real_download method.
"""
_TEST_FILE_SIZE = 10241
params = None
def __init__(self, ydl, params):
"""Create a FileDownloader object with the given options."""
self.ydl = ydl
self._progress_hooks = []
self.params = params
self.add_progress_hook(self.report_progress)
@staticmethod
def format_seconds(seconds):
(mins, secs) = divmod(seconds, 60)
(hours, mins) = divmod(mins, 60)
if hours > 99:
return '--:--:--'
if hours == 0:
return '%02d:%02d' % (mins, secs)
else:
return '%02d:%02d:%02d' % (hours, mins, secs)
@staticmethod
def calc_percent(byte_counter, data_len):
if data_len is None:
return None
return float(byte_counter) / float(data_len) * 100.0
@staticmethod
def format_percent(percent):
if percent is None:
return '---.-%'
return '%6s' % ('%3.1f%%' % percent)
@staticmethod
def calc_eta(start, now, total, current):
if total is None:
return None
if now is None:
now = time.time()
dif = now - start
if current == 0 or dif < 0.001: # One millisecond
return None
rate = float(current) / dif
return int((float(total) - float(current)) / rate)
@staticmethod
def format_eta(eta):
if eta is None:
return '--:--'
return FileDownloader.format_seconds(eta)
@staticmethod
def calc_speed(start, now, bytes):
dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond
return None
return float(bytes) / dif
@staticmethod
def format_speed(speed):
if speed is None:
return '%10s' % '---b/s'
return '%10s' % ('%s/s' % format_bytes(speed))
@staticmethod
def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
return int(new_max)
rate = bytes / elapsed_time
if rate > new_max:
return int(new_max)
if rate < new_min:
return int(new_min)
return int(rate)
@staticmethod
def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer."""
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
if matchobj is None:
return None
number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return int(round(number * multiplier))
def to_screen(self, *args, **kargs):
self.ydl.to_screen(*args, **kargs)
def to_stderr(self, message):
self.ydl.to_screen(message)
def to_console_title(self, message):
self.ydl.to_console_title(message)
def trouble(self, *args, **kargs):
self.ydl.trouble(*args, **kargs)
def report_warning(self, *args, **kargs):
self.ydl.report_warning(*args, **kargs)
def report_error(self, *args, **kargs):
self.ydl.report_error(*args, **kargs)
def slow_down(self, start_time, now, byte_counter):
"""Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit', None)
if rate_limit is None or byte_counter == 0:
return
if now is None:
now = time.time()
elapsed = now - start_time
if elapsed <= 0.0:
return
speed = float(byte_counter) / elapsed
if speed > rate_limit:
time.sleep(max((byte_counter // rate_limit) - elapsed, 0))
def temp_name(self, filename):
"""Returns a temporary filename for the given filename."""
if self.params.get('nopart', False) or filename == '-' or \
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
return filename
return filename + '.part'
def undo_temp_name(self, filename):
if filename.endswith('.part'):
return filename[:-len('.part')]
return filename
def try_rename(self, old_filename, new_filename):
try:
if old_filename == new_filename:
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
self.report_error('unable to rename file: %s' % compat_str(err))
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
if last_modified_hdr is None:
return
if not os.path.isfile(encodeFilename(filename)):
return
timestr = last_modified_hdr
if timestr is None:
return
filetime = timeconvert(timestr)
if filetime is None:
return filetime
# Ignore obviously invalid dates
if filetime == 0:
return
try:
os.utime(filename, (time.time(), filetime))
except Exception:
pass
return filetime
def report_destination(self, filename):
"""Report destination filename."""
self.to_screen('[download] Destination: ' + filename)
def _report_progress_status(self, msg, is_last_line=False):
fullmsg = '[download] ' + msg
if self.params.get('progress_with_newline', False):
self.to_screen(fullmsg)
else:
if os.name == 'nt':
prev_len = getattr(self, '_report_progress_prev_line_length',
0)
if prev_len > len(fullmsg):
fullmsg += ' ' * (prev_len - len(fullmsg))
self._report_progress_prev_line_length = len(fullmsg)
clear_line = '\r'
else:
clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
self.to_console_title('youtube-dl ' + msg)
def report_progress(self, s):
if s['status'] == 'finished':
if self.params.get('noprogress', False):
self.to_screen('[download] Download completed')
else:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
if s.get('elapsed') is not None:
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '100%% of %(_total_bytes_str)s in %(_elapsed_str)s'
else:
msg_template = '100%% of %(_total_bytes_str)s'
self._report_progress_status(
msg_template % s, is_last_line=True)
if self.params.get('noprogress'):
return
if s['status'] != 'downloading':
return
if s.get('eta') is not None:
s['_eta_str'] = self.format_eta(s['eta'])
else:
s['_eta_str'] = 'Unknown ETA'
if s.get('total_bytes') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes'])
elif s.get('total_bytes_estimate') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes_estimate'])
else:
if s.get('downloaded_bytes') == 0:
s['_percent_str'] = self.format_percent(0)
else:
s['_percent_str'] = 'Unknown %'
if s.get('speed') is not None:
s['_speed_str'] = self.format_speed(s['speed'])
else:
s['_speed_str'] = 'Unknown speed'
if s.get('total_bytes') is not None:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
msg_template = '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s'
elif s.get('total_bytes_estimate') is not None:
s['_total_bytes_estimate_str'] = format_bytes(s['total_bytes_estimate'])
msg_template = '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s'
else:
if s.get('downloaded_bytes') is not None:
s['_downloaded_bytes_str'] = format_bytes(s['downloaded_bytes'])
if s.get('elapsed'):
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)'
else:
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
else:
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
self._report_progress_status(msg_template % s)
def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte."""
self.to_screen('[download] Resuming download at byte %s' % resume_len)
def report_retry(self, count, retries):
"""Report retry in case of HTTP error 5xx"""
self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def report_unable_to_resume(self):
"""Report it was impossible to resume download."""
self.to_screen('[download] Unable to resume')
def download(self, filename, info_dict):
"""Download to a filename using the info from info_dict
Return True on success and False otherwise
"""
nooverwrites_and_exists = (
self.params.get('nooverwrites', False) and
os.path.exists(encodeFilename(filename))
)
continuedl_and_exists = (
self.params.get('continuedl', True) and
os.path.isfile(encodeFilename(filename)) and
not self.params.get('nopart', False)
)
# Check file already present
if filename != '-' and nooverwrites_and_exists or continuedl_and_exists:
self.report_file_already_downloaded(filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
'total_bytes': os.path.getsize(encodeFilename(filename)),
})
return True
sleep_interval = self.params.get('sleep_interval')
if sleep_interval:
self.to_screen('[download] Sleeping %s seconds...' % sleep_interval)
time.sleep(sleep_interval)
return self.real_download(filename, info_dict)
def real_download(self, filename, info_dict):
"""Real download process. Redefine in subclasses."""
raise NotImplementedError('This method must be implemented by subclasses')
def _hook_progress(self, status):
for ph in self._progress_hooks:
ph(status)
def add_progress_hook(self, ph):
# See YoutubeDl.py (search for progress_hooks) for a description of
# this interface
self._progress_hooks.append(ph)
def _debug_cmd(self, args, exe=None):
if not self.params.get('verbose', False):
return
str_args = [decodeArgument(a) for a in args]
if exe is None:
exe = os.path.basename(str_args[0])
try:
import pipes
shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
except ImportError:
shell_quote = repr
self.to_screen('[debug] %s command line: %s' % (
exe, shell_quote(str_args)))
|
unlicense
|
pombredanne/AnotherPyGraphvizAgain
|
AnotherPyGraphvizAgain/tests/Framework.py
|
1
|
1641
|
# -*- coding: utf-8 -*-
# Copyright 2013 Vincent Jacques
# [email protected]
# This file is part of AnotherPyGraphvizAgain. http://jacquev6.github.com/AnotherPyGraphvizAgain
# AnotherPyGraphvizAgain is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# AnotherPyGraphvizAgain is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with AnotherPyGraphvizAgain. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os.path
import traceback
def ConstructionTestCase(Graph):
class C(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.g = Graph("First graph")
def expect(self, dotString):
testName = None
for (_, _, functionName, _) in traceback.extract_stack():
if functionName.startswith("test"):
testName = self.__class__.__name__ + "." + functionName
break
self.assertIsNot(testName, None)
# self.g.drawTo(os.path.join("AnotherPyGraphvizAgain", "tests", "drawings", testName + ".png"))
self.assertEqual(self.g.dotString(), dotString)
return C
|
gpl-3.0
|
iRGBit/QGIS
|
python/plugins/db_manager/sqledit.py
|
18
|
6399
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ScriptEdit.py
---------------------
Date : February 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'February 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import Qt, QSettings
from PyQt4.QtGui import QColor, QFont, QShortcut, QKeySequence
from PyQt4.Qsci import QsciScintilla, QsciLexerSQL
class SqlEdit(QsciScintilla):
LEXER_PYTHON = 0
LEXER_R = 1
def __init__(self, parent=None):
QsciScintilla.__init__(self, parent)
self.mylexer = None
self.api = None
self.setCommonOptions()
self.initShortcuts()
def setCommonOptions(self):
# Enable non-ASCII characters
self.setUtf8(True)
# Default font
font = QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
font.setPointSize(10)
self.setFont(font)
self.setMarginsFont(font)
self.setBraceMatching(QsciScintilla.SloppyBraceMatch)
self.setWrapMode(QsciScintilla.WrapWord)
self.setWrapVisualFlags(QsciScintilla.WrapFlagByText,
QsciScintilla.WrapFlagNone, 4)
self.setSelectionForegroundColor(QColor('#2e3436'))
self.setSelectionBackgroundColor(QColor('#babdb6'))
# Show line numbers
self.setMarginWidth(1, '000')
self.setMarginLineNumbers(1, True)
self.setMarginsForegroundColor(QColor('#2e3436'))
self.setMarginsBackgroundColor(QColor('#babdb6'))
# Highlight current line
self.setCaretLineVisible(True)
self.setCaretLineBackgroundColor(QColor('#d3d7cf'))
# Folding
self.setFolding(QsciScintilla.BoxedTreeFoldStyle)
self.setFoldMarginColors(QColor('#d3d7cf'), QColor('#d3d7cf'))
# Mark column 80 with vertical line
self.setEdgeMode(QsciScintilla.EdgeLine)
self.setEdgeColumn(80)
self.setEdgeColor(QColor('#eeeeec'))
# Indentation
self.setAutoIndent(True)
self.setIndentationsUseTabs(False)
self.setIndentationWidth(4)
self.setTabIndents(True)
self.setBackspaceUnindents(True)
self.setTabWidth(4)
# Autocomletion
self.setAutoCompletionThreshold(2)
self.setAutoCompletionSource(QsciScintilla.AcsAPIs)
self.setAutoCompletionCaseSensitivity(False)
# Load font from Python console settings
settings = QSettings()
fontName = settings.value('pythonConsole/fontfamilytext', 'Monospace')
fontSize = int(settings.value('pythonConsole/fontsize', 10))
self.defaultFont = QFont(fontName)
self.defaultFont.setFixedPitch(True)
self.defaultFont.setPointSize(fontSize)
self.defaultFont.setStyleHint(QFont.TypeWriter)
self.defaultFont.setStretch(QFont.SemiCondensed)
self.defaultFont.setLetterSpacing(QFont.PercentageSpacing, 87.0)
self.defaultFont.setBold(False)
self.boldFont = QFont(self.defaultFont)
self.boldFont.setBold(True)
self.italicFont = QFont(self.defaultFont)
self.italicFont.setItalic(True)
self.setFont(self.defaultFont)
self.setMarginsFont(self.defaultFont)
self.initLexer()
def initShortcuts(self):
(ctrl, shift) = (self.SCMOD_CTRL << 16, self.SCMOD_SHIFT << 16)
# Disable some shortcuts
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('D') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl
+ shift)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('T') + ctrl)
# self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord("Z") + ctrl)
#self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord("Y") + ctrl)
# Use Ctrl+Space for autocompletion
self.shortcutAutocomplete = QShortcut(QKeySequence(Qt.CTRL
+ Qt.Key_Space), self)
self.shortcutAutocomplete.setContext(Qt.WidgetShortcut)
self.shortcutAutocomplete.activated.connect(self.autoComplete)
def autoComplete(self):
self.autoCompleteFromAll()
def initLexer(self):
self.mylexer = QsciLexerSQL()
colorDefault = QColor('#2e3436')
colorComment = QColor('#c00')
colorCommentBlock = QColor('#3465a4')
colorNumber = QColor('#4e9a06')
colorType = QColor('#4e9a06')
colorKeyword = QColor('#204a87')
colorString = QColor('#ce5c00')
self.mylexer.setDefaultFont(self.defaultFont)
self.mylexer.setDefaultColor(colorDefault)
self.mylexer.setColor(colorComment, 1)
self.mylexer.setColor(colorNumber, 2)
self.mylexer.setColor(colorString, 3)
self.mylexer.setColor(colorString, 4)
self.mylexer.setColor(colorKeyword, 5)
self.mylexer.setColor(colorString, 6)
self.mylexer.setColor(colorString, 7)
self.mylexer.setColor(colorType, 8)
self.mylexer.setColor(colorCommentBlock, 12)
self.mylexer.setColor(colorString, 15)
self.mylexer.setFont(self.italicFont, 1)
self.mylexer.setFont(self.boldFont, 5)
self.mylexer.setFont(self.boldFont, 8)
self.mylexer.setFont(self.italicFont, 12)
self.setLexer(self.mylexer)
def lexer(self):
return self.mylexer
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.