repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
koehlermichael/olympia | apps/devhub/urls.py | 11 | 9343 | from django.conf.urls import include, patterns, url
from django.shortcuts import redirect
from lib.misc.urlconf_decorator import decorate
from addons.urls import ADDON_ID
from amo.decorators import write
from amo.utils import partial
from . import views
PACKAGE_NAME = '(?P<package_name>[_\w]+)'
# These will all start with /addon/<addon_id>/submit/
submit_patterns = patterns(
'',
url('^$', lambda r, addon_id: redirect('devhub.submit.7', addon_id)),
url('^3$', views.submit_describe, name='devhub.submit.3'),
url('^4$', views.submit_media, name='devhub.submit.4'),
url('^5$', views.submit_license, name='devhub.submit.5'),
url('^6$', views.submit_select_review, name='devhub.submit.6'),
url('^7$', views.submit_done, name='devhub.submit.7'),
url('^bump$', views.submit_bump, name='devhub.submit.bump'),
)
# These will all start with /theme/<slug>/
theme_detail_patterns = patterns(
'',
url('^$', lambda r,
addon_id: redirect('devhub.themes.edit', addon_id, permanent=True)),
url('^delete$', views.delete, name='devhub.themes.delete'),
# Upload url here to satisfy CSRF.
url('^edit/upload/'
'(?P<upload_type>persona_header|persona_footer)$',
views.ajax_upload_image, name='devhub.personas.reupload_persona'),
url('^edit$', views.edit_theme, name='devhub.themes.edit'),
url('^rmlocale$', views.remove_locale, name='devhub.themes.remove-locale'),
)
# These will all start with /addon/<addon_id>/
detail_patterns = patterns(
'',
# Redirect to the edit page from the base.
url('^$', lambda r, addon_id: redirect('devhub.addons.edit', addon_id,
permanent=True)),
url('^edit$', views.edit, name='devhub.addons.edit'),
url('^delete$', views.delete, name='devhub.addons.delete'),
url('^disable$', views.disable, name='devhub.addons.disable'),
url('^unlist$', views.unlist, name='devhub.addons.unlist'),
url('^enable$', views.enable, name='devhub.addons.enable'),
url('^cancel$', views.cancel, name='devhub.addons.cancel'),
url('^ownership$', views.ownership, name='devhub.addons.owner'),
url('^admin$', views.admin, name='devhub.addons.admin'),
url('^payments$', views.payments, name='devhub.addons.payments'),
url('^payments/disable$', views.disable_payments,
name='devhub.addons.payments.disable'),
url('^profile$', views.profile, name='devhub.addons.profile'),
url('^profile/remove$', views.remove_profile,
name='devhub.addons.profile.remove'),
url('^edit_(?P<section>[^/]+)(?:/(?P<editable>[^/]+))?$',
views.addons_section, name='devhub.addons.section'),
url('^upload_preview$', views.upload_image, {'upload_type': 'preview'},
name='devhub.addons.upload_preview'),
url('^upload_icon$', views.upload_image, {'upload_type': 'icon'},
name='devhub.addons.upload_icon'),
url('^upload$', views.upload_for_addon, name='devhub.upload_for_addon'),
url('^upload/(?P<uuid>[^/]+)$', views.upload_detail_for_addon,
name='devhub.upload_detail_for_addon'),
url('^versions$', views.version_list, name='devhub.addons.versions'),
url('^versions/delete$', views.version_delete,
name='devhub.versions.delete'),
url('^versions/add$', views.version_add, name='devhub.versions.add'),
url('^versions/stats$', views.version_stats,
name='devhub.versions.stats'),
url('^versions/(?P<version_id>\d+)$', views.version_edit,
name='devhub.versions.edit'),
url('^versions/(?P<version_id>\d+)/add$', views.version_add_file,
name='devhub.versions.add_file'),
url('^versions/(?P<version>[^/]+)$', views.version_bounce),
url('^file/(?P<file_id>[^/]+)/validation$', views.file_validation,
name='devhub.file_validation'),
url('^file/(?P<file_id>[^/]+)/validation.json$',
views.json_file_validation,
name='devhub.json_file_validation'),
url('^file/(?P<file_id>[^/]+)/validation/annotate$',
views.annotate_file_validation,
name='devhub.annotate_file_validation'),
url('^validation-result/(?P<result_id>\d+)$',
views.bulk_compat_result,
name='devhub.bulk_compat_result'),
url('^validation-result/(?P<result_id>\d+).json$',
views.json_bulk_compat_result,
name='devhub.json_bulk_compat_result'),
url('^submit/', include(submit_patterns)),
url('^submit/resume$', views.submit_resume, name='devhub.submit.resume'),
url('^request-review/(?P<status>[%s])$'
% ''.join(map(str, views.REQUEST_REVIEW)),
views.request_review, name='devhub.request-review'),
url('^rmlocale$', views.remove_locale, name='devhub.addons.remove-locale'),
)
# These will all start with /ajax/addon/<addon_id>/
ajax_patterns = patterns(
'',
url('^dependencies$', views.ajax_dependencies,
name='devhub.ajax.dependencies'),
url('^versions/compatibility/status$',
views.ajax_compat_status, name='devhub.ajax.compat.status'),
url('^versions/compatibility/error$',
views.ajax_compat_error, name='devhub.ajax.compat.error'),
url('^versions/(?P<version_id>\d+)/compatibility$',
views.ajax_compat_update, name='devhub.ajax.compat.update'),
url('^image/status$', views.image_status, name='devhub.ajax.image.status'),
# Performance testing
url(r'^performance/file/(?P<file_id>\d+)/start-tests.json$',
views.file_perf_tests_start, name='devhub.file_perf_tests_start'),
)
redirect_patterns = patterns(
'',
('^addon/edit/(\d+)',
lambda r, id: redirect('devhub.addons.edit', id, permanent=True)),
('^addon/status/(\d+)',
lambda r, id: redirect('devhub.addons.versions', id, permanent=True)),
('^versions/(\d+)',
lambda r, id: redirect('devhub.addons.versions', id, permanent=True)),
)
urlpatterns = decorate(write, patterns(
'',
url('^$', views.index, name='devhub.index'),
url('', include(redirect_patterns)),
# Redirect people who have /addons/ instead of /addon/.
('^addons/\d+/.*',
lambda r: redirect(r.path.replace('addons', 'addon', 1))),
# Add-on submission
url('^addon/submit/$',
lambda r: redirect('devhub.submit.1', permanent=True)),
url('^addon/submit/1$', views.submit, name='devhub.submit.1'),
url('^addon/submit/2$', views.submit_addon, name='devhub.submit.2'),
# Standalone validator:
url('^addon/validate/?$', views.validate_addon,
name='devhub.validate_addon'),
# Standalone compatibility checker:
url('^addon/check-compatibility$', views.check_addon_compatibility,
name='devhub.check_addon_compatibility'),
url(r'^addon/check-compatibility/application_versions\.json$',
views.compat_application_versions,
name='devhub.compat_application_versions'),
# Redirect to /addons/ at the base.
url('^addon$', lambda r: redirect('devhub.addons', permanent=True)),
url('^addons$', views.dashboard, name='devhub.addons'),
url('^themes$', views.dashboard, name='devhub.themes',
kwargs={'theme': True}),
url('^feed$', views.feed, name='devhub.feed_all'),
# TODO: not necessary when devhub homepage is moved out of remora
url('^feed/all$', lambda r: redirect('devhub.feed_all', permanent=True)),
url('^feed/%s$' % ADDON_ID, views.feed, name='devhub.feed'),
url('^upload$', views.upload, name='devhub.upload'),
url('^upload/sideload$', partial(views.upload, is_listed=False),
name='devhub.upload_sideload'),
url('^upload/unlisted$',
partial(views.upload, is_listed=False, automated=True),
name='devhub.upload_unlisted'),
url('^upload/([^/]+)(?:/([^/]+))?$', views.upload_detail,
name='devhub.upload_detail'),
url('^standalone-upload$',
partial(views.upload, is_standalone=True),
name='devhub.standalone_upload'),
url('^standalone-upload-unlisted$',
partial(views.upload, is_standalone=True, is_listed=False,
automated=True),
name='devhub.standalone_upload_unlisted'),
url('^standalone-upload-sideload$',
partial(views.upload, is_standalone=True, is_listed=False),
name='devhub.standalone_upload_sideload'),
url('^standalone-upload/([^/]+)$', views.standalone_upload_detail,
name='devhub.standalone_upload_detail'),
url('^upload-manifest$', views.upload_manifest,
name='devhub.upload_manifest'),
# URLs for a single add-on.
url('^addon/%s/' % ADDON_ID, include(detail_patterns)),
url('^ajax/addon/%s/' % ADDON_ID, include(ajax_patterns)),
# Themes submission.
url('^theme/submit/?$', views.submit_theme, name='devhub.themes.submit'),
url('^theme/%s/submit/done$' % ADDON_ID, views.submit_theme_done,
name='devhub.themes.submit.done'),
url('^theme/submit/upload/'
'(?P<upload_type>persona_header|persona_footer)$',
views.ajax_upload_image, name='devhub.personas.upload_persona'),
url('^theme/%s/' % ADDON_ID, include(theme_detail_patterns)),
# Add-on SDK page
url('builder$', lambda r: redirect(views.MDN_BASE)),
# Developer docs
url('docs/(?P<doc_name>[-_\w]+(?:/[-_\w]+)?)?$',
views.docs, name='devhub.docs'),
# Search
url(r'^search$', views.search, name='devhub.search'),
))
| bsd-3-clause |
UNINETT/nav | python/nav/mibs/alcatel_ind1_port_mib.py | 2 | 3059 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Pär Stolpe, Linköpings universitet
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License version 3 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""A class for getting DDM values for ALE equipment
"""
from twisted.internet import defer
from twisted.internet.defer import returnValue
from nav.smidumps import get_mib
from nav.mibs.mibretriever import MibRetriever
from nav.models.manage import Sensor
COLUMNS = {
"ddmPortTemperature": {
"unit_of_measurement": Sensor.UNIT_CELSIUS,
"precision": 3,
"scale": Sensor.SCALE_MILLI,
"name": "{ifc} Module Temperature",
"description": "{ifc} Module Temperature",
},
"ddmPortTxBiasCurrent": {
"unit_of_measurement": Sensor.UNIT_AMPERES,
"precision": 3,
"scale": Sensor.SCALE_MILLI,
"name": "{ifc} TX Laser Bias Current",
"description": "{ifc} TX Laser Bias Current",
},
"ddmPortTxOutputPower": {
"unit_of_measurement": Sensor.UNIT_DBM,
"precision": 3,
"scale": Sensor.SCALE_MILLI,
"name": "{ifc} TX Laser Output Power",
"description": "{ifc} TX Laser Output Power",
},
"ddmPortRxOpticalPower": {
"unit_of_measurement": Sensor.UNIT_DBM,
"precision": 3,
"scale": Sensor.SCALE_MILLI,
"name": "{ifc} RX Laser Input Power",
"description": "{ifc} RX Laser Input Power",
}
}
class AlcatelInd1PortMib(MibRetriever):
"""MibRetriever for Alcatel Port Sensors"""
mib = get_mib('ALCATEL-IND1-PORT-MIB')
@defer.inlineCallbacks
def get_all_sensors(self):
"""Discovers and returns all eligible dom sensors from this
device.
"""
sensors = []
for column, config in COLUMNS.items():
sensors += yield self.handle_column(column, config)
returnValue(sensors)
@defer.inlineCallbacks
def handle_column(self, column, config):
"""Returns the sensors of the given type"""
result = []
value_oid = self.nodes[column].oid
rows = yield self.retrieve_column(column)
for row in rows:
sensor = dict(
oid=str(value_oid + row),
scale=None,
mib=self.get_module_name(),
internal_name="{ifc}." + column,
ifindex=row[-2],
)
print("SENSOR:")
print(sensor)
sensor.update(config)
result.append(sensor)
returnValue(result)
| gpl-2.0 |
ceibal-tatu/sugar-toolkit | examples/toolbar.py | 2 | 1296 | import gtk
from sugar.graphics.toolbutton import ToolButton
from sugar.graphics.toolbarbox import ToolbarBox, ToolbarButton
from sugar.graphics import style
window = gtk.Window()
box = gtk.VBox()
window.add(box)
toolbar = ToolbarBox()
box.pack_start(toolbar, False)
tollbarbutton_1 = ToolbarButton(
page=gtk.Button('sub-widget #1'),
icon_name='computer-xo')
toolbar.toolbar.insert(tollbarbutton_1, -1)
tollbarbutton_2 = ToolbarButton(
page=gtk.Button('sub-widget #2'),
icon_name='button_cancel',
tooltip='with custom palette instead of sub-widget')
toolbar.toolbar.insert(tollbarbutton_2, -1)
toolbar.toolbar.insert(gtk.SeparatorToolItem(), -1)
def del_cb(widget):
toolbar.toolbar.remove(tollbarbutton_3)
del_b = gtk.Button('delete sub-widget #3')
del_b.connect('clicked', del_cb)
tollbarbutton_3 = ToolbarButton(
page=del_b,
icon_name='activity-journal')
toolbar.toolbar.insert(tollbarbutton_3, -1)
subbar = gtk.Toolbar()
subbutton = ToolButton(
icon_name='document-send',
tooltip='document-send')
subbar.insert(subbutton, -1)
subbar.show_all()
tollbarbutton_4 = ToolbarButton(
page=subbar,
icon_name='document-save')
toolbar.toolbar.insert(tollbarbutton_4, -1)
window.show_all()
gtk.main()
| lgpl-2.1 |
weiwei02/Technical--Documentation | python/src/algrothm/sort/HeapSort.py | 1 | 5210 | #!/usr/bin/env python3
"""选择排序—堆排序(Heap Sort) 时间复杂度 O(nlogn)
堆排序是一种树形选择排序,是对直接选择排序的有效改进。
基本思想:
堆的定义如下:具有n个元素的序列(k1,k2,...,kn),当且仅当满足
最小堆:Ki <= K2i and Ki <= K(2i+1)
最大堆:Ki >= K2i and Ki >= K(2i+1)
时称之为堆。由堆的定义可以看出,堆顶元素(即第一个元素)必为最小项(小顶堆)。
若以一维数组存储一个堆,则堆对应一棵完全二叉树,且所有非叶结点的值均不大于(或不小于)其子女的值,根结点(堆顶元素)的值是最小(或最大)的。如:
(a)大顶堆序列:(96, 83,27,38,11,09)
(b) 小顶堆序列:(12,36,24,85,47,30,53,91)
初始时把要排序的n个数的序列看作是一棵顺序存储的二叉树(一维数组存储二叉树),调整它们的存储序,使之成为一个堆,将堆顶元素输出,得到n 个元素中最小(或最大)的元素,这时堆的根节点的数最小(或者最大)。然后对前面(n-1)个元素重新调整使之成为堆,输出堆顶元素,得到n 个元素中次小(或次大)的元素。依此类推,直到只有两个节点的堆,并对它们作交换,最后得到有n个节点的有序序列。称这个过程为堆排序。
因此,实现堆排序需解决两个问题:
1. 如何将n 个待排序的数建成堆;
2. 输出堆顶元素后,怎样调整剩余n-1 个元素,使其成为一个新堆。
首先讨论第二个问题:输出堆顶元素后,对剩余n-1元素重新建成堆的调整过程。
调整小顶堆的方法:
1)设有m 个元素的堆,输出堆顶元素后,剩下m-1 个元素。将堆底元素送入堆顶((最后一个元素与堆顶进行交换),堆被破坏,其原因仅是根结点不满足堆的性质。
2)将根结点与左、右子树中较小元素的进行交换。
3)若与左子树交换:如果左子树堆被破坏,即左子树的根结点不满足堆的性质,则重复方法 (2).
4)若与右子树交换,如果右子树堆被破坏,即右子树的根结点不满足堆的性质。则重复方法 (2).
5)继续对不满足堆性质的子树进行上述交换操作,直到叶子结点,堆被建成。
这个自根结点到叶子结点的调整过程为筛选。
再讨论对n 个元素初始建堆的过程。
建堆方法:对初始序列建堆的过程,就是一个反复进行筛选的过程。
1)n 个结点的完全二叉树,则最后一个结点是第n/2个结点的子树。
2)筛选从第n/2个结点为根的子树开始,该子树成为堆。
3)之后向前依次对各结点为根的子树进行筛选,使之成为堆,直到根结点。
算法的实现:
从算法描述来看,堆排序需要两个过程,一是建立堆,二是堆顶与堆的最后一个元素交换位置。所以堆排序有两个函数组成。一是建堆的渗透函数,二是反复调用渗透函数实现排序的函数。
:author Wang Weiwei <email>[email protected] / [email protected]</email>
:sine 2017/9/2
:version 1.0
"""
def adjust_heap(arr, top, length):
"""
调整 arr[top] 为大顶堆,即将以第top个元素为根节点的子树重构
:param arr: 待调整的数组
:param top: 根节点
:param length: 数组长度
:return: void
"""
temp = arr[top]
# 左孩子节点的索引 由于数组索引为
chirld = top * 2 + 1
# 上游方法已经对length做了控制 arr[length] 为可取值的状态
while chirld < length:
if chirld < length - 1 and arr[chirld] < arr[chirld + 1]:
chirld += 1
if arr[chirld] > arr[top]:
# 如果较大的子节点大于父节点,则将子节点向上移动
arr[top] = arr[chirld]
arr[chirld] = temp
top = chirld
chirld = top * 2 + 1
else:
break
def built_heap(arr):
"""初始化堆,即逐个调整每个有孩子节点的元素的位置"""
# 最后一个有孩子节点的位置
end = (len(arr) - 1) // 2
while end >= 0:
adjust_heap(arr, end, len(arr) - 1)
end -= 1
def heap_sort(arr):
"""
堆排序
步骤:
1.初始化大顶堆
2.把大顶堆的根元素和数组末尾的元素交换(大顶堆条件被破坏),数组尾指针向前移动一位,然后重构堆
3.重复2,直到堆中不再有元素
4.从小到大排序的数组完成
:param arr:
:return:
"""
built_heap(arr)
# 堆空间
end = len(arr) - 1
while end > 0:
temp = arr[end]
arr[end] = arr[0]
arr[0] = temp
adjust_heap(arr, 0, end)
end -= 1
if __name__ == '__main__':
arr1 = [1, 5, 2, 4, 0, 14, 42, 65, 27, 22]
print('元集合', arr1)
heap_sort(arr1)
print('新集合', arr1) | apache-2.0 |
Distrotech/yum-utils | plugins/tmprepo/tmprepo.py | 5 | 10208 | #!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright Red Hat Inc. 2007, 2008
#
# Author: James Antill <[email protected]>
# Examples:
#
# yum --tmprepo=http://example.com/foo/bar.repo ...
from yum.plugins import TYPE_INTERACTIVE
import logging # for commands
import urlgrabber.grabber
import tempfile
import os
import shutil
import time
requires_api_version = '2.5'
plugin_type = (TYPE_INTERACTIVE,)
def make_validate(log, pkgs_gpgcheck, repo_gpgcheck):
def tvalidate(repo):
if pkgs_gpgcheck or repo_gpgcheck:
# Don't ever allow them to set gpgcheck='false'
if pkgs_gpgcheck and not repo.gpgcheck:
log.warn("Repo %s tried to set gpgcheck=false" % repo)
return False
if (repo_gpgcheck and hasattr(repo, 'repo_gpgcheck') and
not repo.repo_gpgcheck):
log.warn("Repo %s tried to set repo_gpgcheck=false" % repo)
return False
# Don't allow them to set gpgkey=anything
for key in repo.gpgkey:
if not key.startswith('file:/'):
log.warn("Repo %s tried to set gpgkey to %s" %
(repo, repo.gpgkey))
return False
return True
return tvalidate
dnames = []
class AutoCleanupDir:
"""
Given a directory ... let it exist until "the end", and then clean it up.
"""
def __init__(self, dname):
self.dname = dname
# Want it to live until python shutdown
dnames.append(self)
# Can't use __del__ as python doesn't dtrt. on exit
def cleanup(self):
shutil.rmtree(self.dname, ignore_errors=True)
def close_hook(conduit):
global dnames
for dname in dnames:
dname.cleanup()
dnames = []
def add_dir_repo(base, trepo, cleanup):
# Let people do it via. local directories ... we don't want this for
# HTTP because people _should_ just generate .repo files, but for
# local CDs of pkgs etc. we'll be nice.
trepo_path = trepo[len("file:"):]
trepo_data = tempfile.mkdtemp()
if cleanup:
AutoCleanupDir(trepo_data)
else:
os.chmod(trepo_data, 0755)
trepo_name = os.path.basename(os.path.dirname(trepo_path))
tmp_fname = "%s/tmp-%s.repo" % (trepo_data, trepo_name)
repoid = "T-%0.4s-%x" % (trepo_name, int(time.time()))
tpc = 'true'
if not lpgpgcheck:
tpc = 'false'
trc = 'true'
if not lrgpgcheck:
trc = 'false'
open(tmp_fname, "wb").write("""\
[%(repoid)s]
name=Tmp. repo. for %(path)s
baseurl=file:%(dname)s
enabled=1
gpgcheck=%(pkgs_gpgcheck)s
repo_gpgcheck=%(repo_gpgcheck)s
metadata_expire=0
# Make cost smaller, as we know it's "local" ... if this isn't good just create
# your own .repo file. ... then you won't need to createrepo each run either.
cost=500
""" % {'path' : trepo_path,
'repoid' : repoid,
'dname' : trepo_data,
'pkgs_gpgcheck' : tpc,
'repo_gpgcheck' : trc,
})
if cleanup:
print "Creating tmp. repodata for:", trepo_path
else:
print "Creating saved repodata for:", trepo_path
print " * Result is saved here :", tmp_fname
os.spawnlp(os.P_WAIT, "createrepo",
"createrepo", "--database", "--baseurl", trepo,
"--outputdir", trepo_data, trepo_path)
AutoCleanupDir("%s/%s" % (base.conf.cachedir, repoid))
return tmp_fname
def add_repomd_repo(base, repomd):
# Let people do it via. poitning at the repomd.xml file ... smeg it
trepo_data = tempfile.mkdtemp()
AutoCleanupDir(trepo_data)
trepo_name = os.path.basename(os.path.dirname(os.path.dirname(repomd)))
tmp_fname = "%s/tmp-%s.repo" % (trepo_data, trepo_name)
repoid = "T-%0.4s-%x" % (trepo_name, int(time.time()))
pgpgcheck, rgpgcheck = rpgpgcheck, rrgpgcheck
if repomd.startswith("file:"):
pgpgcheck, rgpgcheck = lpgpgcheck, lrgpgcheck
tpc = 'true'
if not pgpgcheck:
tpc = 'false'
trc = 'true'
if not rgpgcheck:
trc = 'false'
open(tmp_fname, "wb").write("""\
[%(repoid)s]
name=Tmp. repo. for %(path)s
baseurl=%(dname)s
enabled=1
gpgcheck=%(pkgs_gpgcheck)s
repo_gpgcheck=%(repo_gpgcheck)s
metadata_expire=0
""" % {'path' : repomd,
'repoid' : repoid,
'dname' : repomd[:-len("repodata/repomd.xml")],
'pkgs_gpgcheck' : tpc,
'repo_gpgcheck' : trc,
})
print "Creating tmp. repo for:", repomd
AutoCleanupDir("%s/%s" % (base.conf.cachedir, repoid))
return tmp_fname
# Note that mirrorlist also includes metalink, due to the anaconda hack.
def add_mirrorlist_repo(base, mirrorlist):
# Let people do it via. poitning at the repomd.xml file ... smeg it
trepo_data = tempfile.mkdtemp()
AutoCleanupDir(trepo_data)
trepo_name = os.path.basename(mirrorlist)
tmp_fname = "%s/tmp-%s.repo" % (trepo_data, trepo_name)
repoid = "T-%4.4s-%x" % (trepo_name, int(time.time()))
tpc = 'true'
if not rpgpgcheck:
tpc = 'false'
trc = 'true'
if not rrgpgcheck:
trc = 'false'
open(tmp_fname, "wb").write("""\
[%(repoid)s]
name=Tmp. repo. for %(path)s
mirrorlist=%(dname)s
enabled=1
gpgcheck=true
metadata_expire=0
""" % {'path' : mirrorlist,
'repoid' : repoid,
'dname' : mirrorlist,
'pkgs_gpgcheck' : tpc,
'repo_gpgcheck' : trc,
})
print "Creating tmp. repo for:", mirrorlist
AutoCleanupDir("%s/%s" % (base.conf.cachedir, repoid))
return tmp_fname
def add_repos(base, log, tmp_repos, tvalidate, tlocvalidate, cleanup_dir_temp,
nogpgcheck):
""" Add temporary repos to yum. """
# Don't use self._splitArg()? ... or require URLs without commas?
for trepo in tmp_repos:
if trepo.startswith("~/"):
trepo = "%s%s" % (os.environ['HOME'], trepo[1:])
if trepo.startswith("/"):
trepo = "file:%s" % trepo
validate = tvalidate
if trepo.startswith("file:"):
validate = tlocvalidate
if trepo.startswith("file:") and trepo.endswith("/"):
if not os.path.isdir(trepo[len("file:"):]):
log.warn("Failed to find directory " + trepo[len("file:"):])
continue
fname = add_dir_repo(base, trepo, cleanup_dir_temp)
elif trepo.endswith("repodata/repomd.xml"):
fname = add_repomd_repo(base, trepo)
elif trepo.endswith(".repo"):
grab = urlgrabber.grabber.URLGrabber()
# Need to keep alive until fname is used
gc_keep = tempfile.NamedTemporaryFile()
fname = gc_keep.name
try:
fname = grab.urlgrab(trepo, fname)
except urlgrabber.grabber.URLGrabError, e:
log.warn("Failed to retrieve " + trepo)
continue
else:
fname = add_mirrorlist_repo(base, trepo)
base.getReposFromConfigFile(fname, validate=validate)
if nogpgcheck:
for repo in base.repos.listEnabled():
repo.gpgcheck = False
repo.repo_gpgcheck = False
# Just do it all again...
base.setupProgressCallbacks()
rpgpgcheck = True # Remote
rrgpgcheck = False # Remote
lpgpgcheck = True
lrgpgcheck = False
def_tmp_repos_cleanup = False
def config_hook(conduit):
'''
Yum Plugin Config Hook:
Add the --tmprepo option.
'''
global rpgpgcheck
global rrgpgcheck
global lpgpgcheck
global lrgpgcheck
global def_tmp_repos_cleanup
parser = conduit.getOptParser()
if not parser:
return
if hasattr(parser, 'plugin_option_group'):
parser = parser.plugin_option_group
parser.add_option("--tmprepo", action='append',
type='string', dest='tmp_repos', default=[],
help="enable one or more repositories from URLs",
metavar='[url]')
parser.add_option("--tmprepo-keep-created", action='store_true',
dest='tmp_repos_cleanup', default=False,
help="keep created direcotry based tmp. repos.")
# We don't default to repository checks for repo files, because no one does
# that signing.
rpgpgcheck = conduit.confBool('main', 'pkgs_gpgcheck', default=True)
rrgpgcheck = conduit.confBool('main', 'repo_gpgcheck', default=False)
lpgpgcheck = conduit.confBool('main', 'pkgs_local_gpgcheck',
default=rpgpgcheck)
lrgpgcheck = conduit.confBool('main', 'repo_local_gpgcheck',
default=False)
def_tmp_repos_cleanup = conduit.confBool('main', 'cleanup', default=False)
_tmprepo_done = False
def prereposetup_hook(conduit):
'''
Process the tmp repos from --tmprepos.
'''
# Stupid group commands doing the explicit setup stuff...
global _tmprepo_done
if _tmprepo_done: return
_tmprepo_done = True
opts, args = conduit.getCmdLine()
if not opts.tmp_repos:
return
if hasattr(conduit, 'registerPackageName'):
conduit.registerPackageName("yum-plugin-tmprepo")
log = logging.getLogger("yum.verbose.main")
add_repos(conduit._base, log, opts.tmp_repos,
make_validate(log, rpgpgcheck, rrgpgcheck),
make_validate(log, lpgpgcheck, lrgpgcheck),
not (opts.tmp_repos_cleanup or def_tmp_repos_cleanup),
opts.nogpgcheck)
| gpl-2.0 |
monikagrabowska/osf.io | addons/github/apps.py | 4 | 1558 | import os
from addons.base.apps import BaseAddonAppConfig
from addons.github.views import github_hgrid_data
HERE = os.path.dirname(os.path.abspath(__file__))
NODE_SETTINGS_TEMPLATE = os.path.join(
HERE,
'templates',
'github_node_settings.mako',
)
class GitHubAddonConfig(BaseAddonAppConfig):
name = 'addons.github'
label = 'addons_github'
full_name = 'GitHub'
short_name = 'github'
configs = ['accounts', 'node']
categories = ['storage']
owners = ['user', 'node']
has_hgrid_files = True
max_file_size = 100 # MB
node_settings_template = NODE_SETTINGS_TEMPLATE
@property
def get_hgrid_data(self):
return github_hgrid_data
FILE_ADDED = 'github_file_added'
FILE_REMOVED = 'github_file_removed'
FILE_UPDATED = 'github_file_updated'
FOLDER_CREATED = 'github_folder_created'
NODE_AUTHORIZED = 'github_node_authorized'
NODE_DEAUTHORIZED = 'github_node_deauthorized'
NODE_DEAUTHORIZED_NO_USER = 'github_node_deauthorized_no_user'
REPO_LINKED = 'github_repo_linked'
actions = (
FILE_ADDED,
FILE_REMOVED,
FILE_UPDATED,
FOLDER_CREATED,
NODE_AUTHORIZED,
NODE_DEAUTHORIZED,
NODE_DEAUTHORIZED_NO_USER,
REPO_LINKED)
@property
def routes(self):
from . import routes
return [routes.api_routes]
@property
def user_settings(self):
return self.get_model('UserSettings')
@property
def node_settings(self):
return self.get_model('NodeSettings')
| apache-2.0 |
xuegang/gpdb | gpMgmt/bin/gppylib/db/test/test_dbconn.py | 54 | 2573 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
""" Unittesting for dbconn module
"""
import unittest
from gppylib.db.dbconn import *
class TestDbURL(unittest.TestCase):
"""UnitTest class for DbURL class"""
def setUp(self):
self._environ = dict(os.environ)
def tearDown(self):
os.environ = self._environ
def testConstructorHost(self):
if 'PGHOST' in os.environ:
del os.environ['PGHOST']
foo = DbURL()
self.assertEqual(foo.pghost,"localhost")
os.environ['PGHOST'] = "foo"
foo = DbURL()
self.assertEqual(foo.pghost,"foo")
foo = DbURL(hostname='yoyoma')
self.assertEqual(foo.pghost,'yoyoma')
def testConstructorPort(self):
if 'PGPORT' in os.environ:
del os.environ['PGPORT']
foo = DbURL()
self.assertEqual(foo.pgport,5432)
os.environ['PGPORT'] = "6000"
foo = DbURL()
self.assertEqual(foo.pgport,6000)
foo = DbURL(port=6000)
self.assertEqual(foo.pgport,6000)
def testConstructorDbname(self):
if 'PGDATABASE' in os.environ:
del os.environ['PGDATABASE']
foo = DbURL()
self.assertEqual(foo.pgdb,'template1')
os.environ['PGDATABASE'] = 'testdb'
foo = DbURL()
self.assertEqual(foo.pgdb,'testdb')
foo = DbURL(dbname='yoyodb')
self.assertEqual(foo.pgdb, 'yoyodb')
def testConstructorUsername(self):
if 'PGUSER' in os.environ:
del os.environ['PGUSER']
foo = DbURL()
self.assertEqual(foo.pguser,os.environ['USER'])
os.environ['PGUSER'] = 'testuser'
foo = DbURL()
self.assertEqual(foo.pguser,'testuser')
foo = DbURL(username='yoyouser')
self.assertEqual(foo.pguser, 'yoyouser')
def testConstructorPass(self):
if 'PGPASSWORD' in os.environ:
del os.environ['PGPASSWORD']
foo = DbURL()
self.assertEqual(foo.pgpass,None)
os.environ['PGPASSWORD'] = 'testpass'
foo = DbURL()
self.assertEqual(foo.pgpass,'testpass')
foo = DbURL(password='yoyopass')
self.assertEqual(foo.pgpass, 'yoyopass')
#----------------------- Main ----------------------
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
zacps/zulip | zerver/views/webhooks/trello/board_actions.py | 16 | 3975 | from typing import Mapping, Any, Tuple, Optional, MutableMapping, Text
from .exceptions import UnknownUpdateBoardAction
from .templates import TRELLO_SUBJECT_TEMPLATE, TRELLO_MESSAGE_TEMPLATE
SUPPORTED_BOARD_ACTIONS = [
u'removeMemberFromBoard',
u'addMemberToBoard',
u'createList',
u'updateBoard',
]
REMOVE_MEMBER = u'removeMemberFromBoard'
ADD_MEMBER = u'addMemberToBoard'
CREATE_LIST = u'createList'
CHANGE_NAME = u'changeName'
TRELLO_BOARD_URL_TEMPLATE = u'[{board_name}]({board_url})'
ACTIONS_TO_MESSAGE_MAPPER = {
REMOVE_MEMBER: u'removed {member_name} from {board_url_template}',
ADD_MEMBER: u'added {member_name} to {board_url_template}',
CREATE_LIST: u'added {list_name} list to {board_url_template}',
CHANGE_NAME: u'renamed the board from {old_name} to {board_url_template}'
}
def process_board_action(payload, action_type):
# type: (Mapping[str, Any], Text) -> Tuple[Text, Text]
action_type = get_proper_action(payload, action_type)
return get_subject(payload), get_body(payload, action_type)
def get_proper_action(payload, action_type):
# type: (Mapping[str, Any], Text) -> Text
if action_type == 'updateBoard':
data = get_action_data(payload)
if data.get('old').get('name'):
return CHANGE_NAME
raise UnknownUpdateBoardAction()
return action_type
def get_subject(payload):
# type: (Mapping[str, Any]) -> Text
data = {
'board_name': get_action_data(payload).get('board').get('name')
}
return TRELLO_SUBJECT_TEMPLATE.format(**data)
def get_body(payload, action_type):
# type: (Mapping[str, Any], Text) -> Text
message_body = ACTIONS_TO_FILL_BODY_MAPPER[action_type](payload, action_type)
creator = payload.get('action').get('memberCreator').get('fullName')
return TRELLO_MESSAGE_TEMPLATE.format(full_name=creator, rest=message_body)
def get_managed_member_body(payload, action_type):
# type: (Mapping[str, Any], Text) -> Text
data = {
'member_name': payload.get('action').get('member').get('fullName'),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_create_list_body(payload, action_type):
# type: (Mapping[str, Any], Text) -> Text
data = {
'list_name': get_action_data(payload).get('list').get('name'),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_change_name_body(payload, action_type):
# type: (Mapping[str, Any], Text) -> Text
data = {
'old_name': get_action_data(payload).get('old').get('name'),
}
return fill_appropriate_message_content(payload, action_type, data)
def fill_appropriate_message_content(payload, action_type, data=None):
# type: (Mapping[str, Any], Text, Optional[Dict[str, Any]]) -> Text
data = {} if data is None else data
data['board_url_template'] = data.get('board_url_template', get_filled_board_url_template(payload))
message_body = get_message_body(action_type)
return message_body.format(**data)
def get_filled_board_url_template(payload):
# type: (Mapping[str, Any]) -> Text
return TRELLO_BOARD_URL_TEMPLATE.format(board_name=get_board_name(payload), board_url=get_board_url(payload))
def get_board_name(payload):
# type: (Mapping[str, Any]) -> Text
return get_action_data(payload).get('board').get('name')
def get_board_url(payload):
# type: (Mapping[str, Any]) -> Text
return u'https://trello.com/b/{}'.format(get_action_data(payload).get('board').get('shortLink'))
def get_message_body(action_type):
# type: (Text) -> Text
return ACTIONS_TO_MESSAGE_MAPPER[action_type]
def get_action_data(payload):
# type: (Mapping[str, Any]) -> Mapping[str, Any]
return payload.get('action').get('data')
ACTIONS_TO_FILL_BODY_MAPPER = {
REMOVE_MEMBER: get_managed_member_body,
ADD_MEMBER: get_managed_member_body,
CREATE_LIST: get_create_list_body,
CHANGE_NAME: get_change_name_body
}
| apache-2.0 |
balbinot/superharris | catalogue/migrations/0006_auto_20170815_1519.py | 1 | 1267 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-15 15:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0005_auto_20170815_1416'),
]
operations = [
migrations.RemoveField(
model_name='coordinate',
name='cluster_id',
),
migrations.RemoveField(
model_name='coordinate',
name='ref',
),
migrations.RemoveField(
model_name='metallicityandphotometry',
name='cluster_id',
),
migrations.RemoveField(
model_name='metallicityandphotometry',
name='ref',
),
migrations.RemoveField(
model_name='velocitiesandstructuralparameter',
name='cluster_id',
),
migrations.RemoveField(
model_name='velocitiesandstructuralparameter',
name='ref',
),
migrations.DeleteModel(
name='Coordinate',
),
migrations.DeleteModel(
name='MetallicityAndPhotometry',
),
migrations.DeleteModel(
name='VelocitiesAndStructuralParameter',
),
]
| mit |
CTSRD-SOAAP/chromium-42.0.2311.135 | tools/telemetry/telemetry/core/platform/power_monitor/sysfs_power_monitor_unittest.py | 13 | 7712 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core.platform.power_monitor import sysfs_power_monitor
from telemetry.core.platform import android_platform_backend
class SysfsPowerMonitorMonitorTest(unittest.TestCase):
initial_freq = {
'cpu0': '1700000 6227\n1600000 0\n1500000 0\n1400000 28\n1300000 22\n'
'1200000 14\n1100000 19\n1000000 22\n900000 14\n800000 20\n'
'700000 15\n600000 23\n500000 23\n400000 9\n300000 28\n200000 179',
'cpu1': '1700000 11491\n1600000 0\n1500000 0\n1400000 248\n1300000 1166\n'
'1200000 2082\n1100000 2943\n1000000 6560\n900000 12517\n'
'800000 8690\n700000 5105\n600000 3800\n500000 5131\n400000 5479\n'
'300000 7571\n200000 133618',
'cpu2': '1700000 1131',
'cpu3': '1700000 1131'
}
final_freq = {
'cpu0': '1700000 7159\n1600000 0\n1500000 0\n1400000 68\n1300000 134\n'
'1200000 194\n1100000 296\n1000000 716\n900000 1301\n800000 851\n'
'700000 554\n600000 343\n500000 612\n400000 691\n300000 855\n'
'200000 15525',
'cpu1': '1700000 12048\n1600000 0\n1500000 0\n1400000 280\n1300000 1267\n'
'1200000 2272\n1100000 3163\n1000000 7039\n900000 13800\n'
'800000 9599\n700000 5655\n600000 4144\n500000 5655\n400000 6005\n'
'300000 8288\n200000 149724',
'cpu2': None,
'cpu3': ''
}
expected_initial_freq = {
'cpu0': {
1700000000: 6227,
1600000000: 0,
1500000000: 0,
1400000000: 28,
1300000000: 22,
1200000000: 14,
1100000000: 19,
1000000000: 22,
900000000: 14,
800000000: 20,
700000000: 15,
600000000: 23,
500000000: 23,
400000000: 9,
300000000: 28,
200000000: 179
},
'cpu1': {
1700000000: 11491,
1600000000: 0,
1500000000: 0,
1400000000: 248,
1300000000: 1166,
1200000000: 2082,
1100000000: 2943,
1000000000: 6560,
900000000: 12517,
800000000: 8690,
700000000: 5105,
600000000: 3800,
500000000: 5131,
400000000: 5479,
300000000: 7571,
200000000: 133618
},
'cpu2': {
1700000000: 1131
},
'cpu3': {
1700000000: 1131
}
}
expected_final_freq = {
'cpu0': {
1700000000: 7159,
1600000000: 0,
1500000000: 0,
1400000000: 68,
1300000000: 134,
1200000000: 194,
1100000000: 296,
1000000000: 716,
900000000: 1301,
800000000: 851,
700000000: 554,
600000000: 343,
500000000: 612,
400000000: 691,
300000000: 855,
200000000: 15525
},
'cpu1': {
1700000000: 12048,
1600000000: 0,
1500000000: 0,
1400000000: 280,
1300000000: 1267,
1200000000: 2272,
1100000000: 3163,
1000000000: 7039,
900000000: 13800,
800000000: 9599,
700000000: 5655,
600000000: 4144,
500000000: 5655,
400000000: 6005,
300000000: 8288,
200000000: 149724
},
'cpu2': None,
'cpu3': {}
}
expected_freq_percents = {
'whole_package': {
1700000000: 3.29254111574526,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.15926805099535601,
1300000000: 0.47124116307273645,
1200000000: 0.818756100807525,
1100000000: 1.099381692400982,
1000000000: 2.5942528544384302,
900000000: 5.68661122326737,
800000000: 3.850545467654628,
700000000: 2.409691872245393,
600000000: 1.4693702487650486,
500000000: 2.4623575553879373,
400000000: 2.672038150383057,
300000000: 3.415770495015825,
200000000: 69.59817400982045
},
'cpu0': {
1700000000: 4.113700564971752,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.1765536723163842,
1300000000: 0.4943502824858757,
1200000000: 0.7944915254237288,
1100000000: 1.2226341807909604,
1000000000: 3.0632062146892656,
900000000: 5.680614406779661,
800000000: 3.6679025423728815,
700000000: 2.379060734463277,
600000000: 1.4124293785310735,
500000000: 2.599752824858757,
400000000: 3.0102401129943503,
300000000: 3.650247175141243,
200000000: 67.73481638418079
},
'cpu1': {
1700000000: 2.4713816665187682,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.1419824296743278,
1300000000: 0.44813204365959713,
1200000000: 0.8430206761913214,
1100000000: 0.9761292040110037,
1000000000: 2.1252994941875945,
900000000: 5.69260803975508,
800000000: 4.033188392936374,
700000000: 2.4403230100275093,
600000000: 1.526311118999024,
500000000: 2.3249622859171177,
400000000: 2.3338361877717633,
300000000: 3.1812938148904073,
200000000: 71.46153163546012
},
'cpu2': {
1700000000: 0.0,
},
'cpu3': {
1700000000: 0.0,
}
}
def testParseCpuFreq(self):
initial = sysfs_power_monitor.SysfsPowerMonitor.ParseFreqSample(
self.initial_freq)
final = sysfs_power_monitor.SysfsPowerMonitor.ParseFreqSample(
self.final_freq)
self.assertDictEqual(initial, self.expected_initial_freq)
self.assertDictEqual(final, self.expected_final_freq)
def testComputeCpuStats(self):
results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
self.expected_initial_freq, self.expected_final_freq)
for cpu in self.expected_freq_percents:
for freq in results[cpu]:
self.assertAlmostEqual(results[cpu][freq],
self.expected_freq_percents[cpu][freq])
def testComputeCpuStatsWithMissingData(self):
results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
{'cpu1': {}}, {'cpu1': {}})
self.assertEqual(results['cpu1'][12345], 0)
results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
{'cpu1': {123: 0}}, {'cpu1': {123: 0}})
self.assertEqual(results['cpu1'][123], 0)
results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
{'cpu1': {123: 456}}, {'cpu1': {123: 456}})
self.assertEqual(results['cpu1'][123], 0)
def testComputeCpuStatsWithNumberChange(self):
results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
{'cpu1': {'C0': 10, 'WFI': 20}},
{'cpu1': {'C0': 20, 'WFI': 10}})
self.assertEqual(results['cpu1']['C0'], 0)
self.assertEqual(results['cpu1']['WFI'], 0)
def testGetCpuStateForAndroidDevices(self):
class PlatformStub(object):
def __init__(self, run_command_return_value):
self._run_command_return_value = run_command_return_value
def RunCommand(self, _cmd):
return self._run_command_return_value
cpu_state_from_samsung_note3 = (
"C0\n\nC1\n\nC2\n\nC3\n\n"
"53658520886\n1809072\n7073\n1722554\n"
"1\n35\n300\n500\n"
"1412949256\n")
expected_cstate_dict = {
'C0': 1412895593940415,
'C1': 1809072,
'C2': 7073,
'C3': 1722554,
'WFI': 53658520886
}
cpus = ["cpu%d" % cpu for cpu in range(2)]
expected_result = dict(zip(cpus, [expected_cstate_dict]*len(cpus)))
sysfsmon = sysfs_power_monitor.SysfsPowerMonitor(
PlatformStub(cpu_state_from_samsung_note3))
# pylint: disable=W0212
sysfsmon._cpus = cpus
cstate = sysfsmon.GetCpuState()
result = android_platform_backend.AndroidPlatformBackend.ParseCStateSample(
cstate)
self.assertDictEqual(expected_result, result)
| bsd-3-clause |
chrisseto/tornado | demos/benchmark/benchmark.py | 15 | 2387 | #!/usr/bin/env python
#
# A simple benchmark of tornado's HTTP stack.
# Requires 'ab' to be installed.
#
# Running without profiling:
# demos/benchmark/benchmark.py
# demos/benchmark/benchmark.py --quiet --num_runs=5|grep "Requests per second"
#
# Running with profiling:
#
# python -m cProfile -o /tmp/prof demos/benchmark/benchmark.py
# python -m pstats /tmp/prof
# % sort time
# % stats 20
from tornado.ioloop import IOLoop
from tornado.options import define, options, parse_command_line
from tornado.web import RequestHandler, Application
import random
import signal
import subprocess
# choose a random port to avoid colliding with TIME_WAIT sockets left over
# from previous runs.
define("min_port", type=int, default=8000)
define("max_port", type=int, default=9000)
# Increasing --n without --keepalive will eventually run into problems
# due to TIME_WAIT sockets
define("n", type=int, default=15000)
define("c", type=int, default=25)
define("keepalive", type=bool, default=False)
define("quiet", type=bool, default=False)
# Repeat the entire benchmark this many times (on different ports)
# This gives JITs time to warm up, etc. Pypy needs 3-5 runs at
# --n=15000 for its JIT to reach full effectiveness
define("num_runs", type=int, default=1)
define("ioloop", type=str, default=None)
class RootHandler(RequestHandler):
def get(self):
self.write("Hello, world")
def _log(self):
pass
def handle_sigchld(sig, frame):
IOLoop.instance().add_callback_from_signal(IOLoop.instance().stop)
def main():
parse_command_line()
if options.ioloop:
IOLoop.configure(options.ioloop)
for i in xrange(options.num_runs):
run()
def run():
app = Application([("/", RootHandler)])
port = random.randrange(options.min_port, options.max_port)
app.listen(port, address='127.0.0.1')
signal.signal(signal.SIGCHLD, handle_sigchld)
args = ["ab"]
args.extend(["-n", str(options.n)])
args.extend(["-c", str(options.c)])
if options.keepalive:
args.append("-k")
if options.quiet:
# just stops the progress messages printed to stderr
args.append("-q")
args.append("http://127.0.0.1:%d/" % port)
subprocess.Popen(args)
IOLoop.instance().start()
IOLoop.instance().close()
del IOLoop._instance
assert not IOLoop.initialized()
if __name__ == '__main__':
main()
| apache-2.0 |
CHBMB/LazyLibrarian | lazylibrarian/classes.py | 1 | 1832 | # This file is part of LazyLibrarian.
#
# LazyLibrarian is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LazyLibrarian is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LazyLibrarian. If not, see <http://www.gnu.org/licenses/>.
#
# Stolen from Sick-Beard's classes.py ##
#
class SearchResult:
"""
Represents a search result from an indexer.
"""
def __init__(self):
self.provider = -1
# URL to the NZB/torrent file
self.url = ""
# used by some providers to store extra info associated with the result
self.extraInfo = []
# release name
self.name = ""
def __str__(self):
if self.provider is None:
return "Invalid provider, unable to print self"
myString = self.provider.name + " @ " + self.url + "\n"
myString += "Extra Info:\n"
for extra in self.extraInfo:
myString += " " + extra + "\n"
return myString
class NZBSearchResult(SearchResult):
"""
Regular NZB result with an URL to the NZB
"""
resultType = "nzb"
class NZBDataSearchResult(SearchResult):
"""
NZB result where the actual NZB XML data is stored in the extraInfo
"""
resultType = "nzbdata"
class TorrentSearchResult(SearchResult):
"""
Torrent result with an URL to the torrent
"""
resultType = "torrent"
| gpl-3.0 |
hastexo/edx-platform | openedx/core/djangoapps/theming/tests/test_storage.py | 26 | 2751 | """
Tests for comprehensive theme static files storage classes.
"""
import ddt
import re
from mock import patch
from django.test import TestCase, override_settings
from django.conf import settings
from openedx.core.djangoapps.theming.helpers import get_theme_base_dirs, Theme, get_theme_base_dir
from openedx.core.djangoapps.theming.storage import ThemeStorage
from openedx.core.djangolib.testing.utils import skip_unless_lms
@skip_unless_lms
@ddt.ddt
class TestStorageLMS(TestCase):
"""
Test comprehensive theming static files storage.
"""
def setUp(self):
super(TestStorageLMS, self).setUp()
self.themes_dir = get_theme_base_dirs()[0]
self.enabled_theme = "red-theme"
self.system_dir = settings.REPO_ROOT / "lms"
self.storage = ThemeStorage(location=self.themes_dir / self.enabled_theme / 'lms' / 'static')
@override_settings(DEBUG=True)
@ddt.data(
(True, "images/logo.png"),
(True, "images/favicon.ico"),
(False, "images/spinning.gif"),
)
@ddt.unpack
def test_themed(self, is_themed, asset):
"""
Verify storage returns True on themed assets
"""
self.assertEqual(is_themed, self.storage.themed(asset, self.enabled_theme))
@override_settings(DEBUG=True)
@ddt.data(
("images/logo.png", ),
("images/favicon.ico", ),
)
@ddt.unpack
def test_url(self, asset):
"""
Verify storage returns correct url depending upon the enabled theme
"""
with patch(
"openedx.core.djangoapps.theming.storage.get_current_theme",
return_value=Theme(self.enabled_theme, self.enabled_theme, get_theme_base_dir(self.enabled_theme)),
):
asset_url = self.storage.url(asset)
# remove hash key from file url
asset_url = re.sub(r"(\.\w+)(\.png|\.ico)$", r"\g<2>", asset_url)
expected_url = self.storage.base_url + self.enabled_theme + "/" + asset
self.assertEqual(asset_url, expected_url)
@override_settings(DEBUG=True)
@ddt.data(
("images/logo.png", ),
("images/favicon.ico", ),
)
@ddt.unpack
def test_path(self, asset):
"""
Verify storage returns correct file path depending upon the enabled theme
"""
with patch(
"openedx.core.djangoapps.theming.storage.get_current_theme",
return_value=Theme(self.enabled_theme, self.enabled_theme, get_theme_base_dir(self.enabled_theme)),
):
returned_path = self.storage.path(asset)
expected_path = self.themes_dir / self.enabled_theme / "lms/static/" / asset
self.assertEqual(expected_path, returned_path)
| agpl-3.0 |
fqul/scrapy | scrapy/utils/python.py | 41 | 10182 | """
This module contains essential stuff that should've come with Python itself ;)
"""
import os
import re
import inspect
import weakref
import errno
import six
from functools import partial, wraps
from scrapy.utils.decorators import deprecated
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
>>> flatten(["foo", "bar"])
['foo', 'bar']
>>> flatten(["foo", ["baz", 42], "bar"])
['foo', 'baz', 42, 'bar']
"""
return list(iflatten(x))
def iflatten(x):
"""iflatten(sequence) -> iterator
Similar to ``.flatten()``, but returns iterator instead"""
for el in x:
if is_listlike(el):
for el_ in flatten(el):
yield el_
else:
yield el
def is_listlike(x):
"""
>>> is_listlike("foo")
False
>>> is_listlike(5)
False
>>> is_listlike(b"foo")
False
>>> is_listlike([b"foo"])
True
>>> is_listlike((b"foo",))
True
>>> is_listlike({})
True
>>> is_listlike(set())
True
>>> is_listlike((x for x in range(3)))
True
>>> is_listlike(six.moves.xrange(5))
True
"""
return hasattr(x, "__iter__") and not isinstance(x, (six.text_type, bytes))
def unique(list_, key=lambda x: x):
"""efficient function to uniquify a list preserving item order"""
seen = set()
result = []
for item in list_:
seenkey = key(item)
if seenkey in seen:
continue
seen.add(seenkey)
result.append(item)
return result
@deprecated("scrapy.utils.python.to_unicode")
def str_to_unicode(text, encoding=None, errors='strict'):
""" This function is deprecated.
Please use scrapy.utils.python.to_unicode. """
return to_unicode(text, encoding, errors)
@deprecated("scrapy.utils.python.to_bytes")
def unicode_to_str(text, encoding=None, errors='strict'):
""" This function is deprecated. Please use scrapy.utils.python.to_bytes """
return to_bytes(text, encoding, errors)
def to_unicode(text, encoding=None, errors='strict'):
"""Return the unicode representation of a bytes object `text`. If `text`
is already an unicode object, return it as-is."""
if isinstance(text, six.text_type):
return text
if not isinstance(text, (bytes, six.text_type)):
raise TypeError('to_unicode must receive a bytes, str or unicode '
'object, got %s' % type(text).__name__)
if encoding is None:
encoding = 'utf-8'
return text.decode(encoding, errors)
def to_bytes(text, encoding=None, errors='strict'):
"""Return the binary representation of `text`. If `text`
is already a bytes object, return it as-is."""
if isinstance(text, bytes):
return text
if not isinstance(text, six.string_types):
raise TypeError('to_bytes must receive a unicode, str or bytes '
'object, got %s' % type(text).__name__)
if encoding is None:
encoding = 'utf-8'
return text.encode(encoding, errors)
def to_native_str(text, encoding=None, errors='strict'):
""" Return str representation of `text`
(bytes in Python 2.x and unicode in Python 3.x). """
if six.PY2:
return to_bytes(text, encoding, errors)
else:
return to_unicode(text, encoding, errors)
def re_rsearch(pattern, text, chunk_size=1024):
"""
This function does a reverse search in a text using a regular expression
given in the attribute 'pattern'.
Since the re module does not provide this functionality, we have to find for
the expression into chunks of text extracted from the end (for the sake of efficiency).
At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for
the pattern. If the pattern is not found, another chunk is extracted, and another
search is performed.
This process continues until a match is found, or until the whole file is read.
In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing
the start position of the match, and the ending (regarding the entire text).
"""
def _chunk_iter():
offset = len(text)
while True:
offset -= (chunk_size * 1024)
if offset <= 0:
break
yield (text[offset:], offset)
yield (text, 0)
if isinstance(pattern, six.string_types):
pattern = re.compile(pattern)
for chunk, offset in _chunk_iter():
matches = [match for match in pattern.finditer(chunk)]
if matches:
start, end = matches[-1].span()
return offset + start, offset + end
return None
def memoizemethod_noargs(method):
"""Decorator to cache the result of a method (without arguments) using a
weak reference to its object
"""
cache = weakref.WeakKeyDictionary()
@wraps(method)
def new_method(self, *args, **kwargs):
if self not in cache:
cache[self] = method(self, *args, **kwargs)
return cache[self]
return new_method
_BINARYCHARS = {six.b(chr(i)) for i in range(32)} - {b"\0", b"\t", b"\n", b"\r"}
_BINARYCHARS |= {ord(ch) for ch in _BINARYCHARS}
def isbinarytext(text):
"""Return True if the given text is considered binary, or False
otherwise, by looking for binary bytes at their chars
"""
if not isinstance(text, bytes):
raise TypeError("text must be bytes, got '%s'" % type(text).__name__)
return any(c in _BINARYCHARS for c in text)
def get_func_args(func, stripself=False):
"""Return the argument name list of a callable"""
if inspect.isfunction(func):
func_args, _, _, _ = inspect.getargspec(func)
elif inspect.isclass(func):
return get_func_args(func.__init__, True)
elif inspect.ismethod(func):
return get_func_args(func.__func__, True)
elif inspect.ismethoddescriptor(func):
return []
elif isinstance(func, partial):
return [x for x in get_func_args(func.func)[len(func.args):]
if not (func.keywords and x in func.keywords)]
elif hasattr(func, '__call__'):
if inspect.isroutine(func):
return []
elif getattr(func, '__name__', None) == '__call__':
return []
else:
return get_func_args(func.__call__, True)
else:
raise TypeError('%s is not callable' % type(func))
if stripself:
func_args.pop(0)
return func_args
def get_spec(func):
"""Returns (args, kwargs) tuple for a function
>>> import re
>>> get_spec(re.match)
(['pattern', 'string'], {'flags': 0})
>>> class Test(object):
... def __call__(self, val):
... pass
... def method(self, val, flags=0):
... pass
>>> get_spec(Test)
(['self', 'val'], {})
>>> get_spec(Test.method)
(['self', 'val'], {'flags': 0})
>>> get_spec(Test().method)
(['self', 'val'], {'flags': 0})
"""
if inspect.isfunction(func) or inspect.ismethod(func):
spec = inspect.getargspec(func)
elif hasattr(func, '__call__'):
spec = inspect.getargspec(func.__call__)
else:
raise TypeError('%s is not callable' % type(func))
defaults = spec.defaults or []
firstdefault = len(spec.args) - len(defaults)
args = spec.args[:firstdefault]
kwargs = dict(zip(spec.args[firstdefault:], defaults))
return args, kwargs
def equal_attributes(obj1, obj2, attributes):
"""Compare two objects attributes"""
# not attributes given return False by default
if not attributes:
return False
for attr in attributes:
# support callables like itemgetter
if callable(attr):
if not attr(obj1) == attr(obj2):
return False
else:
# check that objects has attribute
if not hasattr(obj1, attr):
return False
if not hasattr(obj2, attr):
return False
# compare object attributes
if not getattr(obj1, attr) == getattr(obj2, attr):
return False
# all attributes equal
return True
class WeakKeyCache(object):
def __init__(self, default_factory):
self.default_factory = default_factory
self._weakdict = weakref.WeakKeyDictionary()
def __getitem__(self, key):
if key not in self._weakdict:
self._weakdict[key] = self.default_factory(key)
return self._weakdict[key]
@deprecated
def stringify_dict(dct_or_tuples, encoding='utf-8', keys_only=True):
"""Return a (new) dict with unicode keys (and values when "keys_only" is
False) of the given dict converted to strings. `dct_or_tuples` can be a
dict or a list of tuples, like any dict constructor supports.
"""
d = {}
for k, v in six.iteritems(dict(dct_or_tuples)):
k = k.encode(encoding) if isinstance(k, six.text_type) else k
if not keys_only:
v = v.encode(encoding) if isinstance(v, six.text_type) else v
d[k] = v
return d
@deprecated
def is_writable(path):
"""Return True if the given path can be written (if it exists) or created
(if it doesn't exist)
"""
if os.path.exists(path):
return os.access(path, os.W_OK)
else:
return os.access(os.path.dirname(path), os.W_OK)
@deprecated
def setattr_default(obj, name, value):
"""Set attribute value, but only if it's not already set. Similar to
setdefault() for dicts.
"""
if not hasattr(obj, name):
setattr(obj, name, value)
def retry_on_eintr(function, *args, **kw):
"""Run a function and retry it while getting EINTR errors"""
while True:
try:
return function(*args, **kw)
except IOError as e:
if e.errno != errno.EINTR:
raise
| bsd-3-clause |
CubicERP/geraldo | site/newsite/django_1_0/django/core/management/commands/createcachetable.py | 33 | 1810 | from django.core.management.base import LabelCommand
class Command(LabelCommand):
help = "Creates the table needed to use the SQL cache backend."
args = "<tablename>"
label = 'tablename'
requires_model_validation = False
def handle_label(self, tablename, **options):
from django.db import connection, transaction, models
fields = (
# "key" is a reserved word in MySQL, so use "cache_key" instead.
models.CharField(name='cache_key', max_length=255, unique=True, primary_key=True),
models.TextField(name='value'),
models.DateTimeField(name='expires', db_index=True),
)
table_output = []
index_output = []
qn = connection.ops.quote_name
for f in fields:
field_output = [qn(f.name), f.db_type()]
field_output.append("%sNULL" % (not f.null and "NOT " or ""))
if f.primary_key:
field_output.append("PRIMARY KEY")
elif f.unique:
field_output.append("UNIQUE")
if f.db_index:
unique = f.unique and "UNIQUE " or ""
index_output.append("CREATE %sINDEX %s_%s ON %s (%s);" % \
(unique, tablename, f.name, qn(tablename),
qn(f.name)))
table_output.append(" ".join(field_output))
full_statement = ["CREATE TABLE %s (" % qn(tablename)]
for i, line in enumerate(table_output):
full_statement.append(' %s%s' % (line, i < len(table_output)-1 and ',' or ''))
full_statement.append(');')
curs = connection.cursor()
curs.execute("\n".join(full_statement))
for statement in index_output:
curs.execute(statement)
transaction.commit_unless_managed()
| lgpl-3.0 |
dmacvicar/spacewalk | backend/server/test/TestRedhat.py | 2 | 3529 | #
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# A class that makes testing stuff in backend/server/redhat_xmlrpc a little easier.
# It requires the rhn_server_redhat-xmlrpc.conf file in /etc/rhn/default.
# By default it uses the test-file-upload account in webdev.
# Change the value of the download_files_prefix option in rhn_server_redhat-xmlrpc.conf
# to some directory on your local machine.
# Mine is set to wregglej, for instance.
# Change the value of the mount_point (or add the mount_point option) in /etc/rhn/rhn_server.conf on you local machine.
# Mine is set to /home/devel, for instance.
# Place some tarballs in a directory on you machine that is under the path formed by joining the mount_point value
# with the download_files_prefix value.
# I put them in /home/devel/wregglej/testing/tarballs/t1.
# Modify the values in data to reflect your set up.
import TestServer
import server.redhat_xmlrpc
import SimpleXMLRPCServer
from spacewalk.common import rhnConfig
class TestRedhat( TestServer.TestServer ):
def __init__(self):
TestServer.TestServer.__init__(self)
rhnConfig.initCFG("server.redhat-xmlrpc")
self._init_xmlrpc()
def _init_xmlrpc(self):
self.rpc = server.redhat_xmlrpc
def getXmlRpc(self):
return self.rpc
def getUsername(self):
return "test-file-upload"
def getPassword(self):
return "password"
if __name__ == "__main__":
server = TestRedhat()
rpc = server.getXmlRpc()
rpc_downloads = rpc.downloads.Downloads()
category = "RHN Test Download"
channel = 'rhn-test-download'
data = [
{
'path' : "testing/tarballs/t1/examplesT1.tar.gz",
'name' : "examples1",
'channel' : channel,
'file_size' : '162671',
'md5sum' : 'a39e4a3e8a5615b01b40598fd23d2abf',
'category' : category,
'ordering' : '1',
},
{
'path' : "testing/tarballs/t1/examplesT2.tar.gz",
'name' : "examples2",
'channel' : channel,
'file_size' : '162671',
'md5sum' : 'a39e4a3e8a5615b01b40598fd23d2abf',
'category' : category,
'ordering' : '2',
},
]
info = {
'entries' : data,
'username' : 'test-file-upload',
'password' : 'password',
'channel' : channel,
'commit' : 1,
'force' : 1
}
### DELETE THE DOWNLOADS
#print rpc_downloads.delete_category_files(info)
### ADD THE DOWNLOADS
# print rpc_downloads.add_downloadable_files(info)
server = SimpleXMLRPCServer.SimpleXMLRPCServer(addr=('', 8000))
for func in rpc_downloads.functions:
print func
server.register_function( getattr( rpc_downloads, func), name="downloads.%s" % (func) )
server.serve_forever()
| gpl-2.0 |
ChawalitK/odoo | addons/account/wizard/account_invoice_state.py | 47 | 1679 | # -*- coding: utf-8 -*-
from openerp import models, api, _
from openerp.exceptions import UserError
class AccountInvoiceConfirm(models.TransientModel):
"""
This wizard will confirm the all the selected draft invoices
"""
_name = "account.invoice.confirm"
_description = "Confirm the selected invoices"
@api.multi
def invoice_confirm(self):
context = dict(self._context or {})
active_ids = context.get('active_ids', []) or []
for record in self.env['account.invoice'].browse(active_ids):
if record.state not in ('draft', 'proforma', 'proforma2'):
raise UserError(_("Selected invoice(s) cannot be confirmed as they are not in 'Draft' or 'Pro-Forma' state."))
record.signal_workflow('invoice_open')
return {'type': 'ir.actions.act_window_close'}
class AccountInvoiceCancel(models.TransientModel):
"""
This wizard will cancel the all the selected invoices.
If in the journal, the option allow cancelling entry is not selected then it will give warning message.
"""
_name = "account.invoice.cancel"
_description = "Cancel the Selected Invoices"
@api.multi
def invoice_cancel(self):
context = dict(self._context or {})
active_ids = context.get('active_ids', []) or []
for record in self.env['account.invoice'].browse(active_ids):
if record.state in ('cancel', 'paid'):
raise UserError(_("Selected invoice(s) cannot be cancelled as they are already in 'Cancelled' or 'Done' state."))
record.signal_workflow('invoice_cancel')
return {'type': 'ir.actions.act_window_close'}
| gpl-3.0 |
SlimRoms/kernel_sony_apq8064 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
pioneer/guess-language | guess_language/blocks.py | 65 | 2079 | ''' Categorize unicode characters by the code block in which they are found.
Copyright (c) 2008, Kent S Johnson
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
'''
import os, re
from bisect import bisect_left
def _loadBlocks():
''' Load Blocks.txt.
Create and return two parallel lists. One has the start and end points for
codepoint ranges, the second has the corresponding block name.
'''
# Expects our version of Blocks.txt to be in the same dir as this file
blocksPath = os.path.join(os.path.dirname(__file__), 'Blocks.txt')
endpoints = []
names = []
splitter = re.compile(r'^(....)\.\.(....); (.*)$')
for line in open(blocksPath):
if line.startswith('#'):
continue
line = line.strip()
if not line:
continue
m = splitter.match(line)
assert m
start = int(m.group(1), 16)
end = int(m.group(2), 16)
name = m.group(3)
endpoints.append(start)
endpoints.append(end)
names.append(name)
names.append(name)
return endpoints, names
_endpoints, _names = _loadBlocks()
def unicodeBlock(c):
''' Returns the name of the unicode block containing c
c must be a single character. '''
ix = bisect_left(_endpoints, ord(c))
return _names[ix]
| lgpl-2.1 |
py-chemist/web_apps | mol2chemfig/atom.py | 3 | 6364 | import math, string
import chemfig_mappings as cfm
from common import debug
# some atoms should carry their hydrogens to the left, rather than
# to the right. This is applied to solitary atoms, but not to bonded
# functional groups that contain those elements.
hydrogen_lefties = "O S Se Te F Cl Br I At".split() # I hope these are all ...
class Atom(object):
'''
wrapper around toolkit atom object, augmented with coordinates
helper class for molecule.Molecule
'''
explicit_characters = set(string.ascii_uppercase + string.digits)
quadrant_turf = 80 # 80 degrees have to remain free on either side
quadrants = [ # quadrants for hydrogen placement
[0, 0, 'east'],
[1, 180, 'west'],
[2, 270, 'south'],
[3, 90, 'north']
]
charge_positions = [ # angles for placement of detached charges
[0, 15, 'top_right'],
[1,165, 'top_left'],
[2, 90, 'top_center'],
[3,270, 'bottom_center'],
[4,345, 'bottom_right'],
[5,195, 'bottom_left']
]
charge_turf = 50 # reserved angle for charges - needs to be big enough for 2+
def __init__(self, options, idx, x, y, element, hydrogens, charge, radical, neighbors):
self.options = options
self.idx = idx
self.x = x
self.y = y
self.element = element
self.hydrogens = hydrogens
self.charge = charge
self.radical = radical
self.neighbors = neighbors # the indexes only
# angles of all attached bonds - to be populated later
self.bond_angles = []
# self.explicit = False # flag for explicitly printed atoms - set later
marker = self.options.get('markers', None)
if marker is not None:
self.marker = "%s%s" % (marker, self.idx + 1)
else:
self.marker = ""
def _score_angle(self, a, b, turf):
'''
helper. calculates absolute angle between a and b.
0 <= angle <= 180
then compares to turf angle and returns a score > 0
if angle falls within turf.
'''
diff = (a-b) % 360
angle = min(diff,360-diff)
return (max(0, turf - angle)) ** 2
def _score_angles(self, choices, turf):
'''
backend for score_angles
'''
aux = []
for priority, choice_angle, name in choices:
score = 0
for bond_angle in self.bond_angles:
score += self._score_angle(choice_angle, bond_angle, turf)
aux.append((score, priority, name))
aux.sort()
#if self.element == 'Cl':
#debug(aux)
named = [a[-1] for a in aux]
return named
def score_angles(self):
'''
determine which positions
We use one score for the placement of hydrogens w/ or w/o charge,
and a separate one for the placement of charges only.
Atoms: precedence east, west, south, north
tolerated impingement 10 degrees
Charges: precedence top right, top left, top straight,
bottom straight, others
'''
if len(self.bond_angles) > 0: # this atom is bonded
quadrants = self._score_angles(self.quadrants, self.quadrant_turf)
self.first_quadrant = quadrants[0]
self.second_quadrant = quadrants[1] # 2nd choice may be used for radical electrons
else: # this atom is solitary
if self.element in hydrogen_lefties:
self.first_quadrant = 'west'
self.second_quadrant = 'east'
else:
self.first_quadrant = 'east'
self.second_quadrant = 'west'
self.charge_angle = self._score_angles(self.charge_positions, self.charge_turf)[0]
def render_phantom(self):
'''
render a bond that closes a ring or loop, or for
late-rendered cross bonds. The target atom
is represented by a phantom.
This relies on .render() having been called earlier, which
it will be - atoms always precede their phantoms during
molecule tree traversal.
'''
atom_code = self.phantom
comment_code = cfm.format_closure_comment(
self.options,
self.idx + 1,
self.element,
self.hydrogens,
self.charge
)
return atom_code, comment_code
def render(self):
'''
render the atom and a comment
'''
atom_code, self.string_pos, \
self.phantom, self.phantom_pos = cfm.format_atom(
self.options,
self.idx + 1,
self.element,
self.hydrogens,
self.charge,
self.radical,
self.first_quadrant,
self.second_quadrant,
self.charge_angle
)
comment_code = cfm.format_atom_comment(
self.options,
self.idx + 1,
self.element,
self.hydrogens,
self.charge
)
marker_code = cfm.format_marker(self.marker)
if marker_code:
comment_code = " " # force an empty comment, needed after markers
self.explicit = bool(self.explicit_characters & set(atom_code))
# debug(self.idx, atom_code, self.explicit)
return marker_code + atom_code, comment_code
| gpl-3.0 |
MobileCloudNetworking/icnaas | mcn-icn-so/wsgi/icnaas/monitor.py | 1 | 4448 | # Copyright (c) 2013-2015, University of Bern, Switzerland.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Andre Gomes"
__copyright__ = "Copyright (c) 2013-2015, Mobile Cloud Networking (MCN) project"
__credits__ = ["Andre Gomes", "Bruno Sousa", "Claudio Marques"]
__license__ = "Apache"
__version__ = "1.2"
__maintainer__ = "Andre Gomes"
__email__ = "[email protected]"
__status__ = "Production"
"""
Monitor for ICNaaS.
Version 1.2
"""
from zabbix_api import ZabbixAPI
import time
import traceback
import sys
MAAS_UID = 'admin'
MAAS_PWD = 'zabbix'
CCN_ROUTER_CPU = 0
CCN_CACHE_SIZE = 1
CCN_CCND_STATUS = 2
CCN_CCNR_STATUS = 3
CCN_NETWORK_DAEMON_STATUS = 4
CCN_NUMBER_OF_INTERESTS = 5
CCN_REPOSITORY_SIZE = 6
CCN_TOTAL_NETWORK_TRAFFIC = 7
class ICNaaSMonitor(object):
def __init__(self, maas_endpoint):
"""
Initialize the ICNaaS Monitor object
"""
# Connect to MaaS
if maas_endpoint is None:
self.maas_endpoint = '130.92.70.142'
else:
self.maas_endpoint = maas_endpoint
self.server = 'http://' + self.maas_endpoint + '/zabbix'
self.username = MAAS_UID
self.password = MAAS_PWD
self.connFailed = False
# Zabbix API
self.zapi = ZabbixAPI(server=self.server)
for i in range(1,4):
try:
print('*** Connecting to MaaS at ' + self.server)
self.zapi.login(self.username, self.password)
print('*** Connected to MaaS')
self.connFailed = False
break
except Exception as e:
print('*** Caught exception: %s: %s' % (e.__class__, e))
traceback.print_exc()
print('*** Connection to MaaS has failed! Retrying ('+str(i)+').')
self.connFailed = True
time.sleep(3)
if self.connFailed:
print('*** Connection to MaaS has failed! Waiting for an update to try again.')
self.__metrics = []
@property
def metrics(self):
return self.__metrics
@metrics.setter
def metrics(self, value):
self.__metrics = value
pass
def get(self, public_ip):
measured_values = {}
for metric in self.metrics:
measured_values[metric] = self.get_value(metric, public_ip)
if measured_values[metric] is None:
return
return measured_values
def get_value(self, metric, public_ip):
raise NotImplementedError
class ICNaaSMonitorCCNRouter(ICNaaSMonitor):
def __init__(self, maas_endpoint):
ICNaaSMonitor.__init__(self, maas_endpoint)
self.metrics = [CCN_ROUTER_CPU, CCN_NUMBER_OF_INTERESTS]
def get_value(self, metric, public_ip):
item=""
if metric == CCN_ROUTER_CPU:
item = "system.cpu.util[,idle]"
if metric == CCN_CACHE_SIZE:
item = "ccnx.cache"
if metric == CCN_CCND_STATUS:
item = "proc.num[ccnd]"
if metric == CCN_CCNR_STATUS:
item = "proc.num[ccnr]"
if metric == CCN_NETWORK_DAEMON_STATUS:
item = "net.udp.listen[9695]"
if metric == CCN_NUMBER_OF_INTERESTS:
item = "ccnx.interests"
if metric == CCN_REPOSITORY_SIZE:
item = "ccnx.repository"
if metric == CCN_TOTAL_NETWORK_TRAFFIC:
item = "net.if.total[eth0]"
try:
hostid = self.zapi.host.get({"filter":{'ip':public_ip}})[0]["hostid"]
except:
print "WARNING: Public IP " + public_ip + " not found"
return
try:
value = self.zapi.item.get({"output":"extend","hostids":hostid,"filter":{"key_":item}})[0]["lastvalue"]
return value
except Exception as e:
print "ERROR: User metric not found"
traceback.print_exc()
| apache-2.0 |
openhatch/oh-mainline | vendor/packages/celery/celery/concurrency/base.py | 18 | 3964 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import os
import sys
import time
import traceback
from functools import partial
from .. import log
from ..datastructures import ExceptionInfo
from ..utils import timer2
from ..utils.encoding import safe_repr
def apply_target(target, args=(), kwargs={}, callback=None,
accept_callback=None, pid=None):
if accept_callback:
accept_callback(pid or os.getpid(), time.time())
callback(target(*args, **kwargs))
class BasePool(object):
RUN = 0x1
CLOSE = 0x2
TERMINATE = 0x3
Timer = timer2.Timer
signal_safe = True
rlimit_safe = True
is_green = False
_state = None
_pool = None
def __init__(self, limit=None, putlocks=True, logger=None, **options):
self.limit = limit
self.putlocks = putlocks
self.logger = logger or log.get_default_logger()
self.options = options
self.does_debug = self.logger.isEnabledFor(logging.DEBUG)
def on_start(self):
pass
def on_stop(self):
pass
def on_apply(self, *args, **kwargs):
pass
def on_terminate(self):
pass
def terminate_job(self, pid):
raise NotImplementedError(
"%s does not implement kill_job" % (self.__class__, ))
def stop(self):
self._state = self.CLOSE
self.on_stop()
self._state = self.TERMINATE
def terminate(self):
self._state = self.TERMINATE
self.on_terminate()
def start(self):
self.on_start()
self._state = self.RUN
def apply_async(self, target, args=None, kwargs=None, callback=None,
errback=None, accept_callback=None, timeout_callback=None,
soft_timeout=None, timeout=None, **compat):
"""Equivalent of the :func:`apply` built-in function.
Callbacks should optimally return as soon as possible ince
otherwise the thread which handles the result will get blocked.
"""
args = args or []
kwargs = kwargs or {}
on_ready = partial(self.on_ready, callback, errback)
on_worker_error = partial(self.on_worker_error, errback)
if self.does_debug:
self.logger.debug("TaskPool: Apply %s (args:%s kwargs:%s)",
target, safe_repr(args), safe_repr(kwargs))
return self.on_apply(target, args, kwargs,
callback=on_ready,
accept_callback=accept_callback,
timeout_callback=timeout_callback,
error_callback=on_worker_error,
waitforslot=self.putlocks,
soft_timeout=soft_timeout,
timeout=timeout)
def on_ready(self, callback, errback, ret_value):
"""What to do when a worker task is ready and its return value has
been collected."""
if isinstance(ret_value, ExceptionInfo):
if isinstance(ret_value.exception, (
SystemExit, KeyboardInterrupt)):
raise ret_value.exception
self.safe_apply_callback(errback, ret_value)
else:
self.safe_apply_callback(callback, ret_value)
def on_worker_error(self, errback, exc_info):
errback(exc_info)
def safe_apply_callback(self, fun, *args):
if fun:
try:
fun(*args)
except BaseException:
self.logger.error("Pool callback raised exception: %s",
traceback.format_exc(),
exc_info=sys.exc_info())
def _get_info(self):
return {}
@property
def info(self):
return self._get_info()
@property
def active(self):
return self._state == self.RUN
@property
def num_processes(self):
return self.limit
| agpl-3.0 |
xxxhycl2010/powerline | tests/lib/__init__.py | 18 | 7870 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import imp
import sys
class Pl(object):
def __init__(self):
self.exceptions = []
self.errors = []
self.warns = []
self.debugs = []
self.infos = []
self.prefix = None
self.use_daemon_threads = True
for meth in ('error', 'warn', 'debug', 'exception', 'info'):
exec ((
'def {0}(self, msg, *args, **kwargs):\n'
' self.{0}s.append((kwargs.get("prefix") or self.prefix, msg, args, kwargs))\n'
).format(meth))
class Args(object):
theme_override = {}
config_override = {}
config_path = None
ext = ['shell']
renderer_module = None
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def urllib_read(query_url):
if query_url.startswith('http://ipv'):
if query_url.startswith('http://ipv4.icanhazip.com'):
return '127.0.0.1'
elif query_url.startswith('http://ipv4.icanhazip.com'):
return '2001:4801:7818:6:abc5:ba2c:ff10:275f'
elif query_url.startswith('http://freegeoip.net/json/'):
return '{"city": "Meppen", "region_code": "06", "region_name": "Niedersachsen", "areacode": "", "ip": "82.145.55.16", "zipcode": "49716", "longitude": 7.3167, "country_name": "Germany", "country_code": "DE", "metrocode": "", "latitude": 52.6833}'
elif query_url.startswith('http://query.yahooapis.com/v1/public/'):
if 'Meppen' in query_url:
return r'{"query":{"count":1,"created":"2013-03-02T13:20:22Z","lang":"en-US","results":{"weather":{"rss":{"version":"2.0","geo":"http://www.w3.org/2003/01/geo/wgs84_pos#","yweather":"http://xml.weather.yahoo.com/ns/rss/1.0","channel":{"title":"Yahoo! Weather - Russia, RU","link":"http://us.rd.yahoo.com/dailynews/rss/weather/Russia__RU/*http://weather.yahoo.com/forecast/RSXX1511_c.html","description":"Yahoo! Weather for Russia, RU","language":"en-us","lastBuildDate":"Sat, 02 Mar 2013 4:58 pm MSK","ttl":"60","location":{"city":"Russia","country":"Russia","region":""},"units":{"distance":"km","pressure":"mb","speed":"km/h","temperature":"C"},"wind":{"chill":"-9","direction":"0","speed":""},"atmosphere":{"humidity":"94","pressure":"1006.1","rising":"0","visibility":""},"astronomy":{"sunrise":"10:04 am","sunset":"7:57 pm"},"image":{"title":"Yahoo! Weather","width":"142","height":"18","link":"http://weather.yahoo.com","url":"http://l.yimg.com/a/i/brand/purplelogo//uh/us/news-wea.gif"},"item":{"title":"Conditions for Russia, RU at 4:58 pm MSK","lat":"59.45","long":"108.83","link":"http://us.rd.yahoo.com/dailynews/rss/weather/Russia__RU/*http://weather.yahoo.com/forecast/RSXX1511_c.html","pubDate":"Sat, 02 Mar 2013 4:58 pm MSK","condition":{"code":"30","date":"Sat, 02 Mar 2013 4:58 pm MSK","temp":"-9","text":"Partly Cloudy"},"description":"<img src=\"http://l.yimg.com/a/i/us/we/52/30.gif\"/><br />\n<b>Current Conditions:</b><br />\nPartly Cloudy, -9 C<BR />\n<BR /><b>Forecast:</b><BR />\nSat - Partly Cloudy. High: -9 Low: -19<br />\nSun - Partly Cloudy. High: -12 Low: -18<br />\n<br />\n<a href=\"http://us.rd.yahoo.com/dailynews/rss/weather/Russia__RU/*http://weather.yahoo.com/forecast/RSXX1511_c.html\">Full Forecast at Yahoo! Weather</a><BR/><BR/>\n(provided by <a href=\"http://www.weather.com\" >The Weather Channel</a>)<br/>","forecast":[{"code":"29","date":"2 Mar 2013","day":"Sat","high":"-9","low":"-19","text":"Partly Cloudy"},{"code":"30","date":"3 Mar 2013","day":"Sun","high":"-12","low":"-18","text":"Partly Cloudy"}],"guid":{"isPermaLink":"false","content":"RSXX1511_2013_03_03_7_00_MSK"}}}}}}}}'
elif 'Moscow' in query_url:
return r'{"query":{"count":1,"created":"2013-03-02T13:20:22Z","lang":"en-US","results":{"weather":{"rss":{"version":"2.0","geo":"http://www.w3.org/2003/01/geo/wgs84_pos#","yweather":"http://xml.weather.yahoo.com/ns/rss/1.0","channel":{"title":"Yahoo! Weather - Russia, RU","link":"http://us.rd.yahoo.com/dailynews/rss/weather/Russia__RU/*http://weather.yahoo.com/forecast/RSXX1511_c.html","description":"Yahoo! Weather for Russia, RU","language":"en-us","lastBuildDate":"Sat, 02 Mar 2013 4:58 pm MSK","ttl":"60","location":{"city":"Russia","country":"Russia","region":""},"units":{"distance":"km","pressure":"mb","speed":"km/h","temperature":"C"},"wind":{"chill":"-9","direction":"0","speed":""},"atmosphere":{"humidity":"94","pressure":"1006.1","rising":"0","visibility":""},"astronomy":{"sunrise":"10:04 am","sunset":"7:57 pm"},"image":{"title":"Yahoo! Weather","width":"142","height":"18","link":"http://weather.yahoo.com","url":"http://l.yimg.com/a/i/brand/purplelogo//uh/us/news-wea.gif"},"item":{"title":"Conditions for Russia, RU at 4:58 pm MSK","lat":"59.45","long":"108.83","link":"http://us.rd.yahoo.com/dailynews/rss/weather/Russia__RU/*http://weather.yahoo.com/forecast/RSXX1511_c.html","pubDate":"Sat, 02 Mar 2013 4:58 pm MSK","condition":{"code":"30","date":"Sat, 02 Mar 2013 4:58 pm MSK","temp":"19","text":"Partly Cloudy"},"description":"<img src=\"http://l.yimg.com/a/i/us/we/52/30.gif\"/><br />\n<b>Current Conditions:</b><br />\nPartly Cloudy, -9 C<BR />\n<BR /><b>Forecast:</b><BR />\nSat - Partly Cloudy. High: -9 Low: -19<br />\nSun - Partly Cloudy. High: -12 Low: -18<br />\n<br />\n<a href=\"http://us.rd.yahoo.com/dailynews/rss/weather/Russia__RU/*http://weather.yahoo.com/forecast/RSXX1511_c.html\">Full Forecast at Yahoo! Weather</a><BR/><BR/>\n(provided by <a href=\"http://www.weather.com\" >The Weather Channel</a>)<br/>","forecast":[{"code":"29","date":"2 Mar 2013","day":"Sat","high":"-9","low":"-19","text":"Partly Cloudy"},{"code":"30","date":"3 Mar 2013","day":"Sun","high":"-12","low":"-18","text":"Partly Cloudy"}],"guid":{"isPermaLink":"false","content":"RSXX1511_2013_03_03_7_00_MSK"}}}}}}}}'
else:
raise NotImplementedError
class Process(object):
def __init__(self, output, err):
self.output = output
self.err = err
def communicate(self):
return self.output, self.err
class ModuleReplace(object):
def __init__(self, name, new):
self.name = name
self.new = new
def __enter__(self):
self.old = sys.modules.get(self.name)
if not self.old:
try:
self.old = __import__(self.name)
except ImportError:
pass
sys.modules[self.name] = self.new
def __exit__(self, *args):
if self.old:
sys.modules[self.name] = self.old
else:
sys.modules.pop(self.name)
def replace_module(name, new=None, **kwargs):
if not new:
new = new_module(name, **kwargs)
return ModuleReplace(name, new)
def new_module(name, **kwargs):
module = imp.new_module(name)
for k, v in kwargs.items():
setattr(module, k, v)
return module
class AttrReplace(object):
def __init__(self, obj, *args):
self.obj = obj
self.attrs = args[::2]
self.new = args[1::2]
def __enter__(self):
self.old = {}
for i, attr in enumerate(self.attrs):
try:
self.old[i] = getattr(self.obj, attr)
except AttributeError:
pass
for attr, new in zip(self.attrs, self.new):
setattr(self.obj, attr, new)
def __exit__(self, *args):
for i, attr in enumerate(self.attrs):
try:
old = self.old[i]
except KeyError:
delattr(self.obj, attr)
else:
setattr(self.obj, attr, old)
replace_attr = AttrReplace
def replace_module_module(module, name, **kwargs):
return replace_attr(module, name, new_module(name, **kwargs))
class ItemReplace(object):
def __init__(self, d, key, new, r=None):
self.key = key
self.new = new
self.d = d
self.r = r
def __enter__(self):
self.old = self.d.get(self.key)
self.d[self.key] = self.new
return self.r
def __exit__(self, *args):
if self.old is None:
try:
self.d.pop(self.key)
except KeyError:
pass
else:
self.d[self.key] = self.old
def replace_item(d, key, new):
return ItemReplace(d, key, new, d)
def replace_env(key, new, environ=None, **kwargs):
r = kwargs.copy()
r['environ'] = environ or {}
return ItemReplace(r['environ'], key, new, r)
| mit |
lucernae/geonode | geonode/views.py | 2 | 5518 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django import forms
from django.conf import settings
from django.contrib.auth import authenticate, login, get_user_model
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
try:
import json
except ImportError:
from django.utils import simplejson as json
from django.db.models import Q
from django.template.response import TemplateResponse
from geonode import get_version
from geonode.base.templatetags.base_tags import facets
from geonode.groups.models import GroupProfile
class AjaxLoginForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput)
username = forms.CharField()
def ajax_login(request):
if request.method != 'POST':
return HttpResponse(
content="ajax login requires HTTP POST",
status=405,
content_type="text/plain"
)
form = AjaxLoginForm(data=request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is None or not user.is_active:
return HttpResponse(
content="bad credentials or disabled user",
status=400,
content_type="text/plain"
)
else:
login(request, user)
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponse(
content="successful login",
status=200,
content_type="text/plain"
)
else:
return HttpResponse(
"The form you submitted doesn't look like a username/password combo.",
content_type="text/plain",
status=400)
def ajax_lookup(request):
if request.method != 'POST':
return HttpResponse(
content='ajax user lookup requires HTTP POST',
status=405,
content_type='text/plain'
)
elif 'query' not in request.POST:
return HttpResponse(
content='use a field named "query" to specify a prefix to filter usernames',
content_type='text/plain')
keyword = request.POST['query']
users = get_user_model().objects.filter(Q(username__icontains=keyword)).exclude(Q(username='AnonymousUser') |
Q(is_active=False))
groups = GroupProfile.objects.filter(Q(title__icontains=keyword))
json_dict = {
'users': [({'username': u.username}) for u in users],
'count': users.count(),
}
json_dict['groups'] = [({'name': g.slug, 'title': g.title})
for g in groups]
return HttpResponse(
content=json.dumps(json_dict),
content_type='text/plain'
)
def err403(request):
if not request.user.is_authenticated():
return HttpResponseRedirect(
reverse('account_login') +
'?next=' +
request.get_full_path())
else:
return TemplateResponse(request, '401.html', {}, status=401).render()
def ident_json(request):
if not request.user.is_authenticated():
return HttpResponseRedirect(
reverse('account_login') +
'?next=' +
request.get_full_path())
json_data = {}
json_data['siteurl'] = settings.SITEURL
json_data['name'] = settings.PYCSW['CONFIGURATION']['metadata:main']['identification_title']
json_data['poc'] = {
'name': settings.PYCSW['CONFIGURATION']['metadata:main']['contact_name'],
'email': settings.PYCSW['CONFIGURATION']['metadata:main']['contact_email'],
'twitter': 'https://twitter.com/%s' % settings.TWITTER_SITE
}
json_data['version'] = get_version()
json_data['services'] = {
'csw': settings.CATALOGUE['default']['URL'],
'ows': settings.OGC_SERVER['default']['LOCATION']
}
json_data['counts'] = facets({'request': request, 'facet_type': 'home'})
return HttpResponse(content=json.dumps(json_data),
mimetype='application/json')
def h_keywords(request):
from geonode.base.models import HierarchicalKeyword as hk
keywords = json.dumps(hk.dump_bulk_tree())
return HttpResponse(content=keywords)
def moderator_contacted(request, inactive_user=None):
"""Used when a user signs up."""
user = get_user_model().objects.get(id=inactive_user)
return TemplateResponse(
request,
template="account/admin_approval_sent.html",
context={"email": user.email}
)
| gpl-3.0 |
iglpdc/nipype | nipype/interfaces/camino/tests/test_auto_Conmat.py | 10 | 1480 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..connectivity import Conmat
def test_Conmat_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='-inputfile %s',
mandatory=True,
),
output_root=dict(argstr='-outputroot %s',
genfile=True,
),
scalar_file=dict(argstr='-scalarfile %s',
requires=['tract_stat'],
),
target_file=dict(argstr='-targetfile %s',
mandatory=True,
),
targetname_file=dict(argstr='-targetnamefile %s',
),
terminal_output=dict(nohash=True,
),
tract_prop=dict(argstr='-tractstat %s',
units='NA',
xor=['tract_stat'],
),
tract_stat=dict(argstr='-tractstat %s',
requires=['scalar_file'],
units='NA',
xor=['tract_prop'],
),
)
inputs = Conmat.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Conmat_outputs():
output_map = dict(conmat_sc=dict(),
conmat_ts=dict(),
)
outputs = Conmat.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
varunagrawal/azure-services | varunagrawal/site-packages/django/contrib/gis/tests/geo3d/tests.py | 94 | 11317 | from __future__ import absolute_import
import os
import re
from django.utils.unittest import TestCase
from django.contrib.gis.db.models import Union, Extent3D
from django.contrib.gis.geos import GEOSGeometry, Point, Polygon
from django.contrib.gis.utils import LayerMapping, LayerMapError
from .models import (City3D, Interstate2D, Interstate3D, InterstateProj2D,
InterstateProj3D, Point2D, Point3D, MultiPoint3D, Polygon2D, Polygon3D)
data_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = dict((name, coords) for name, coords in city_data)
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
( 11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16 ,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_wkt = 'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,942051.75 4208366.38,941527.97 4225693.20))'
bbox_z = (21.71, 13.21, 9.12, 16.40, 21.71)
def gen_bbox():
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
return bbox_2d, bbox_3d
class Geo3DTest(TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
http://postgis.refractions.net/documentation/manual-1.4/ch08.html#PostGIS_3D_Functions
"""
def test01_3d(self):
"Test the creation of 3D models."
# 3D models for the rest of the tests will be populated in here.
# For each 3D data set create model (and 2D version if necessary),
# retrieve, and assert geometry is in 3D and contains the expected
# 3D values.
for name, pnt_data in city_data:
x, y, z = pnt_data
pnt = Point(x, y, z, srid=4326)
City3D.objects.create(name=name, point=pnt)
city = City3D.objects.get(name=name)
self.assertTrue(city.point.hasz)
self.assertEqual(z, city.point.z)
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
# Using `hex` attribute because it omits 3D.
line_2d = GEOSGeometry(line_3d.hex, srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
# Retrieving and making sure it's 3D and has expected
# Z values -- shouldn't change because of coordinate system.
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
# Creating 3D Polygon.
bbox2d, bbox3d = gen_bbox()
Polygon2D.objects.create(name='2D BBox', poly=bbox2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox3d)
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertEqual(bbox3d, p3d.poly)
def test01a_3d_layermapping(self):
"Testing LayerMapping on 3D models."
from .models import Point2D, Point3D
point_mapping = {'point' : 'POINT'}
mpoint_mapping = {'mpoint' : 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
self.assertRaises(LayerMapError, LayerMapping,
Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
def test02a_kml(self):
"Test GeoQuerySet.kml() with Z values."
h = City3D.objects.kml(precision=6).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test02b_geojson(self):
"Test GeoQuerySet.geojson() with Z values."
h = City3D.objects.geojson(precision=6).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
def test03a_union(self):
"Testing the Union aggregate of 3D models."
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
ref_ewkt = 'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)'
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union('point'))['point__union']
self.assertTrue(union.hasz)
self.assertEqual(ref_union, union)
def test03b_extent(self):
"Testing the Extent3D aggregate for 3D models."
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14,174.783117, 48.462611, 1433)
extent1 = City3D.objects.aggregate(Extent3D('point'))['point__extent3d']
extent2 = City3D.objects.extent3d()
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
for e3d in [extent1, extent2]:
check_extent3d(e3d)
def test04_perimeter(self):
"Testing GeoQuerySet.perimeter() on 3D fields."
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
self.assertAlmostEqual(ref_perim_2d,
Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m,
tol)
self.assertAlmostEqual(ref_perim_3d,
Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m,
tol)
def test05_length(self):
"Testing GeoQuerySet.length() on 3D fields."
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
self.assertAlmostEqual(ref_length_2d,
Interstate2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
Interstate3D.objects.length().get(name='I-45').length.m,
tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
self.assertAlmostEqual(ref_length_2d,
InterstateProj2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
InterstateProj3D.objects.length().get(name='I-45').length.m,
tol)
def test06_scale(self):
"Testing GeoQuerySet.scale() on Z values."
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.scale(1.0, 1.0, zscale):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test07_translate(self):
"Testing GeoQuerySet.translate() on Z values."
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.translate(0, 0, ztrans):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
| gpl-2.0 |
BayanGroup/sentry | src/sentry/cache/redis.py | 7 | 1436 | """
sentry.cache.redis
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.conf import settings
from rb import Cluster
from sentry.utils import json
from .base import BaseCache
class RedisCache(BaseCache):
key_expire = 60 * 60 # 1 hour
def __init__(self, version=None, prefix=None, **options):
if not options:
# inherit default options from REDIS_OPTIONS
options = settings.SENTRY_REDIS_OPTIONS
options.setdefault('hosts', {
0: {},
})
self.cluster = Cluster(options['hosts'])
self.client = self.cluster.get_routing_client()
super(RedisCache, self).__init__(version=version, prefix=prefix)
def set(self, key, value, timeout, version=None):
key = self.make_key(key, version=version)
v = json.dumps(value)
if timeout:
self.client.setex(key, int(timeout), v)
else:
self.client.set(key, v)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.client.delete(key)
def get(self, key, version=None):
key = self.make_key(key, version=version)
result = self.client.get(key)
if result is not None:
result = json.loads(result)
return result
| bsd-3-clause |
veroc/Bika-LIMS | bika/lims/tools/bika_ar_export.py | 5 | 9933 | from DateTime import DateTime
from AccessControl import ClassSecurityInfo
from App.class_init import InitializeClass
from OFS.SimpleItem import SimpleItem
from Products.CMFCore import permissions
from Products.CMFCore.utils import UniqueObject, getToolByName
from bika.lims.config import ManageAnalysisRequests
from bika.lims.tools import ToolFolder
from cStringIO import StringIO
import csv
from bika.lims.interfaces.tools import Ibika_ar_export
from zope.interface import implements
class bika_ar_export(UniqueObject, SimpleItem):
""" ARExportTool """
implements(Ibika_ar_export)
security = ClassSecurityInfo()
id = 'bika_ar_export'
title = 'AR Export Tool'
description = 'Exports Analysis Request Data.'
meta_type = 'AR Export Tool'
security.declareProtected(ManageAnalysisRequests, 'export_file')
def export_file(self, info):
plone_view = self.restrictedTraverse('@@plone')
""" create the output file """
delimiter = ','
# make filename unique
now = DateTime()
filename = 'BikaResults%s.csv' % (now.strftime('%Y%m%d-%H%M%S'))
if self.bika_setup.getARAttachmentOption() == 'n':
allow_ar_attach = False
else:
allow_ar_attach = True
if self.bika_setup.getAnalysisAttachmentOption() == 'n':
allow_analysis_attach = False
else:
allow_analysis_attach = True
# group the analyses
analysisrequests = info['analysis_requests']
ars = {}
services = {}
categories = {}
dry_matter = 0
for ar in analysisrequests:
ar_id = ar.getId()
ars[ar_id] = {}
ars[ar_id]['Analyses'] = {}
ars[ar_id]['Price'] = 0
ars[ar_id]['Count'] = 0
if ar.getReportDryMatter():
dry_matter = 1
ars[ar_id]['DM'] = True
else:
ars[ar_id]['DM'] = False
analyses = {}
# extract the list of analyses in this batch
for analysis in ar.getPublishedAnalyses():
ars[ar_id]['Price'] += analysis.getPrice()
ars[ar_id]['Count'] += 1
service = analysis.Title()
analyses[service] = {}
analyses[service]['AsIs'] = analysis.getResult()
analyses[service]['DM'] = analysis.getResultDM() or None
analyses[service]['attach'] = analysis.getAttachment() or []
if not services.has_key(service):
service_obj = analysis.getService()
category = service_obj.getCategoryTitle()
category_uid = service_obj.getCategoryUID()
if not categories.has_key(category):
categories[category] = []
categories[category].append(service)
services[service] = {}
services[service]['unit'] = service_obj.getUnit()
services[service]['DM'] = service_obj.getReportDryMatter()
services[service]['DMOn'] = False
if allow_analysis_attach:
if service_obj.getAttachmentOption() == 'n':
services[service]['attach'] = False
else:
services[service]['attach'] = True
if services[service]['DM'] == True \
and ar.getReportDryMatter():
services[service]['DMOn'] = True
ars[ar_id]['Analyses'] = analyses
# sort by category and title
c_array = categories.keys()
c_array.sort(lambda x, y:cmp(x.lower(), y.lower()))
client = analysisrequests[0].aq_parent
client_id = client.getClientID()
client_name = client.Title()
contact = info['contact']
contact_id = contact.getUsername()
contact_name = contact.getFullname()
rows = []
# header labels
header = ['Header', 'Import/Export', 'Filename', 'Client', \
'Client ID', 'Contact', 'Contact ID', 'CC Recipients', 'CCEmails']
rows.append(header)
# header values
cc_contacts = [cc.getUsername() for cc in ar.getCCContact()]
ccs = ', '.join(cc_contacts)
header = ['Header Data', 'Export', filename, client_name, \
client_id, contact_name, contact_id, ccs, ar.getCCEmails(), \
'']
rows.append(header)
# category headers
s_array = []
header = ['', '', '', '', '', '', '', '', '', '', '']
for cat_name in c_array:
service_array = categories[cat_name]
service_array.sort(lambda x, y:cmp(x.lower(), y.lower()))
for service_name in service_array:
header.append(cat_name)
if services[service_name]['DMOn']:
header.append('')
if services[service_name]['attach']:
header.append('')
s_array.extend(service_array)
rows.append(header)
# column headers
header = ['Samples', 'Order ID', 'Client Reference', 'Client SID', 'Sample Type', \
'Sample Point', 'Sampling Date', 'Bika Sample ID', \
'Bika AR ID', 'Date Received', 'Date Published']
for service_name in s_array:
if services[service_name]['unit']:
analysis_service = '%s (%s)' % (service_name, services[service_name]['unit'])
else:
analysis_service = service_name
if services[service_name]['DMOn']:
analysis_service = '%s [As Fed]' % (analysis_service)
header.append(analysis_service)
if services[service_name]['DMOn']:
analysis_dm = '%s [Dry]' % (service_name)
header.append(analysis_dm)
if services[service_name]['attach']:
header.append('Attachments')
count_cell = len(header)
header.append('Total number of analyses')
header.append('Price excl VAT')
if allow_ar_attach:
header.append('Attachments')
rows.append(header)
# detail lines
total_count = 0
total_price = 0
count = 1
for ar in analysisrequests:
sample_num = 'Sample %s' % count
ar_id = ar.getId()
sample = ar.getSample()
sample_id = sample.getId()
sampletype = sample.getSampleType().Title()
samplepoint = sample.getSamplePoint() and sample.getSamplePoint().Title() or ''
datereceived = plone_view.toLocalizedTime(ar.getDateReceived(), \
long_format = 1)
datepublished = plone_view.toLocalizedTime(ar.getDatePublished(), \
long_format = 1)
if sample.getDateSampled():
datesampled = plone_view.toLocalizedTime(sample.getDateSampled(), long_format = 1)
else:
datesampled = None
# create detail line
detail = [sample_num, ar.getClientOrderNumber(), \
sample.getClientReference(), sample.getClientSampleID(), sampletype, \
samplepoint, datesampled, sample_id, ar_id, \
datereceived, datepublished]
for service_name in s_array:
if ars[ar_id]['Analyses'].has_key(service_name):
detail.append(ars[ar_id]['Analyses'][service_name]['AsIs'])
if services[service_name]['DMOn']:
detail.append(ars[ar_id]['Analyses'][service_name]['DM'])
if allow_analysis_attach:
if services[service_name]['attach'] == True:
attachments = ''
for attach in ars[ar_id]['Analyses'][service_name]['attach']:
file = attach.getAttachmentFile()
fname = getattr(file, 'filename')
attachments += fname
detail.append(attachments)
else:
detail.append(' ')
if services[service_name]['DMOn']:
detail.append(' ')
if services[service_name]['attach'] == True:
detail.append(' ')
for i in range(len(detail), count_cell):
detail.append('')
detail.append(ars[ar_id]['Count'])
detail.append(ars[ar_id]['Price'])
total_count += ars[ar_id]['Count']
total_price += ars[ar_id]['Price']
if allow_ar_attach:
attachments = ''
for attach in ar.getAttachment():
file = attach.getAttachmentFile()
fname = getattr(file, 'filename')
if attachments:
attachments += ', '
attachments += fname
detail.append(attachments)
rows.append(detail)
count += 1
detail = []
for i in range(count_cell - 1):
detail.append('')
detail.append('Total')
detail.append(total_count)
detail.append(total_price)
rows.append(detail)
#convert lists to csv string
ramdisk = StringIO()
writer = csv.writer(ramdisk, delimiter = delimiter, \
quoting = csv.QUOTE_NONNUMERIC)
assert(writer)
writer.writerows(rows)
result = ramdisk.getvalue()
ramdisk.close()
file_data = {}
file_data['file'] = result
file_data['file_name'] = filename
return file_data
InitializeClass(bika_ar_export)
| agpl-3.0 |
acshan/odoo | addons/website/models/website.py | 53 | 35327 | # -*- coding: utf-8 -*-
import cStringIO
import contextlib
import datetime
import hashlib
import inspect
import logging
import math
import mimetypes
import unicodedata
import os
import re
import time
import urlparse
from PIL import Image
from sys import maxint
import werkzeug
# optional python-slugify import (https://github.com/un33k/python-slugify)
try:
import slugify as slugify_lib
except ImportError:
slugify_lib = None
import openerp
from openerp.osv import orm, osv, fields
from openerp.tools import html_escape as escape, ustr, image_resize_and_sharpen, image_save_for_web
from openerp.tools.safe_eval import safe_eval
from openerp.addons.web.http import request
logger = logging.getLogger(__name__)
def url_for(path_or_uri, lang=None):
if isinstance(path_or_uri, unicode):
path_or_uri = path_or_uri.encode('utf-8')
current_path = request.httprequest.path
if isinstance(current_path, unicode):
current_path = current_path.encode('utf-8')
location = path_or_uri.strip()
force_lang = lang is not None
url = urlparse.urlparse(location)
if request and not url.netloc and not url.scheme and (url.path or force_lang):
location = urlparse.urljoin(current_path, location)
lang = lang or request.context.get('lang')
langs = [lg[0] for lg in request.website.get_languages()]
if (len(langs) > 1 or force_lang) and is_multilang_url(location, langs):
ps = location.split('/')
if ps[1] in langs:
# Replace the language only if we explicitly provide a language to url_for
if force_lang:
ps[1] = lang
# Remove the default language unless it's explicitly provided
elif ps[1] == request.website.default_lang_code:
ps.pop(1)
# Insert the context language or the provided language
elif lang != request.website.default_lang_code or force_lang:
ps.insert(1, lang)
location = '/'.join(ps)
return location.decode('utf-8')
def is_multilang_url(local_url, langs=None):
if not langs:
langs = [lg[0] for lg in request.website.get_languages()]
spath = local_url.split('/')
# if a language is already in the path, remove it
if spath[1] in langs:
spath.pop(1)
local_url = '/'.join(spath)
try:
# Try to match an endpoint in werkzeug's routing table
url = local_url.split('?')
path = url[0]
query_string = url[1] if len(url) > 1 else None
router = request.httprequest.app.get_db_router(request.db).bind('')
# Force to check method to POST. Odoo uses methods : ['POST'] and ['GET', 'POST']
func = router.match(path, method='POST', query_args=query_string)[0]
return (func.routing.get('website', False) and
func.routing.get('multilang', func.routing['type'] == 'http'))
except Exception:
return False
def slugify(s, max_length=None):
""" Transform a string to a slug that can be used in a url path.
This method will first try to do the job with python-slugify if present.
Otherwise it will process string by stripping leading and ending spaces,
converting unicode chars to ascii, lowering all chars and replacing spaces
and underscore with hyphen "-".
:param s: str
:param max_length: int
:rtype: str
"""
s = ustr(s)
if slugify_lib:
# There are 2 different libraries only python-slugify is supported
try:
return slugify_lib.slugify(s, max_length=max_length)
except TypeError:
pass
uni = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore').decode('ascii')
slug = re.sub('[\W_]', ' ', uni).strip().lower()
slug = re.sub('[-\s]+', '-', slug)
return slug[:max_length]
def slug(value):
if isinstance(value, orm.browse_record):
# [(id, name)] = value.name_get()
id, name = value.id, value.display_name
else:
# assume name_search result tuple
id, name = value
slugname = slugify(name or '').strip().strip('-')
if not slugname:
return str(id)
return "%s-%d" % (slugname, id)
# NOTE: as the pattern is used as it for the ModelConverter (ir_http.py), do not use any flags
_UNSLUG_RE = re.compile(r'(?:(\w{1,2}|\w[A-Za-z0-9-_]+?\w)-)?(-?\d+)(?=$|/)')
def unslug(s):
"""Extract slug and id from a string.
Always return un 2-tuple (str|None, int|None)
"""
m = _UNSLUG_RE.match(s)
if not m:
return None, None
return m.group(1), int(m.group(2))
def urlplus(url, params):
return werkzeug.Href(url)(params or None)
class website(osv.osv):
def _get_menu_website(self, cr, uid, ids, context=None):
# IF a menu is changed, update all websites
return self.search(cr, uid, [], context=context)
def _get_menu(self, cr, uid, ids, name, arg, context=None):
root_domain = [('parent_id', '=', False)]
menus = self.pool.get('website.menu').search(cr, uid, root_domain, order='id', context=context)
menu = menus and menus[0] or False
return dict( map(lambda x: (x, menu), ids) )
_name = "website" # Avoid website.website convention for conciseness (for new api). Got a special authorization from xmo and rco
_description = "Website"
_columns = {
'name': fields.char('Domain'),
'company_id': fields.many2one('res.company', string="Company"),
'language_ids': fields.many2many('res.lang', 'website_lang_rel', 'website_id', 'lang_id', 'Languages'),
'default_lang_id': fields.many2one('res.lang', string="Default language"),
'default_lang_code': fields.related('default_lang_id', 'code', type="char", string="Default language code", store=True),
'social_twitter': fields.char('Twitter Account'),
'social_facebook': fields.char('Facebook Account'),
'social_github': fields.char('GitHub Account'),
'social_linkedin': fields.char('LinkedIn Account'),
'social_youtube': fields.char('Youtube Account'),
'social_googleplus': fields.char('Google+ Account'),
'google_analytics_key': fields.char('Google Analytics Key'),
'user_id': fields.many2one('res.users', string='Public User'),
'partner_id': fields.related('user_id','partner_id', type='many2one', relation='res.partner', string='Public Partner'),
'menu_id': fields.function(_get_menu, relation='website.menu', type='many2one', string='Main Menu',
store= {
'website.menu': (_get_menu_website, ['sequence','parent_id','website_id'], 10)
})
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool['ir.model.data'].xmlid_to_res_id(cr, openerp.SUPERUSER_ID, 'base.main_company'),
}
# cf. Wizard hack in website_views.xml
def noop(self, *args, **kwargs):
pass
def write(self, cr, uid, ids, vals, context=None):
self._get_languages.clear_cache(self)
return super(website, self).write(cr, uid, ids, vals, context)
def new_page(self, cr, uid, name, template='website.default_page', ispage=True, context=None):
context = context or {}
imd = self.pool.get('ir.model.data')
view = self.pool.get('ir.ui.view')
template_module, template_name = template.split('.')
# completely arbitrary max_length
page_name = slugify(name, max_length=50)
page_xmlid = "%s.%s" % (template_module, page_name)
try:
# existing page
imd.get_object_reference(cr, uid, template_module, page_name)
except ValueError:
# new page
_, template_id = imd.get_object_reference(cr, uid, template_module, template_name)
page_id = view.copy(cr, uid, template_id, context=context)
page = view.browse(cr, uid, page_id, context=context)
page.write({
'arch': page.arch.replace(template, page_xmlid),
'name': page_name,
'page': ispage,
})
imd.create(cr, uid, {
'name': page_name,
'module': template_module,
'model': 'ir.ui.view',
'res_id': page_id,
'noupdate': True
}, context=context)
return page_xmlid
def page_for_name(self, cr, uid, ids, name, module='website', context=None):
# whatever
return '%s.%s' % (module, slugify(name, max_length=50))
def page_exists(self, cr, uid, ids, name, module='website', context=None):
try:
name = (name or "").replace("/page/website.", "").replace("/page/", "")
if not name:
return False
return self.pool["ir.model.data"].get_object_reference(cr, uid, module, name)
except:
return False
@openerp.tools.ormcache(skiparg=3)
def _get_languages(self, cr, uid, id):
website = self.browse(cr, uid, id)
return [(lg.code, lg.name) for lg in website.language_ids]
def get_languages(self, cr, uid, ids, context=None):
return self._get_languages(cr, uid, ids[0])
def get_alternate_languages(self, cr, uid, ids, req=None, context=None):
langs = []
if req is None:
req = request.httprequest
default = self.get_current_website(cr, uid, context=context).default_lang_code
uri = req.path
if req.query_string:
uri += '?' + req.query_string
shorts = []
for code, name in self.get_languages(cr, uid, ids, context=context):
lg_path = ('/' + code) if code != default else ''
lg = code.split('_')
shorts.append(lg[0])
lang = {
'hreflang': ('-'.join(lg)).lower(),
'short': lg[0],
'href': req.url_root[0:-1] + lg_path + uri,
}
langs.append(lang)
for lang in langs:
if shorts.count(lang['short']) == 1:
lang['hreflang'] = lang['short']
return langs
def get_current_website(self, cr, uid, context=None):
# TODO: Select website, currently hard coded
return self.pool['website'].browse(cr, uid, 1, context=context)
def is_publisher(self, cr, uid, ids, context=None):
Access = self.pool['ir.model.access']
is_website_publisher = Access.check(cr, uid, 'ir.ui.view', 'write', False, context=context)
return is_website_publisher
def is_user(self, cr, uid, ids, context=None):
Access = self.pool['ir.model.access']
return Access.check(cr, uid, 'ir.ui.menu', 'read', False, context=context)
def get_template(self, cr, uid, ids, template, context=None):
if isinstance(template, (int, long)):
view_id = template
else:
if '.' not in template:
template = 'website.%s' % template
module, xmlid = template.split('.', 1)
model, view_id = request.registry["ir.model.data"].get_object_reference(cr, uid, module, xmlid)
return self.pool["ir.ui.view"].browse(cr, uid, view_id, context=context)
def _render(self, cr, uid, ids, template, values=None, context=None):
# TODO: remove this. (just kept for backward api compatibility for saas-3)
return self.pool['ir.ui.view'].render(cr, uid, template, values=values, context=context)
def render(self, cr, uid, ids, template, values=None, status_code=None, context=None):
# TODO: remove this. (just kept for backward api compatibility for saas-3)
return request.render(template, values, uid=uid)
def pager(self, cr, uid, ids, url, total, page=1, step=30, scope=5, url_args=None, context=None):
# Compute Pager
page_count = int(math.ceil(float(total) / step))
page = max(1, min(int(page if str(page).isdigit() else 1), page_count))
scope -= 1
pmin = max(page - int(math.floor(scope/2)), 1)
pmax = min(pmin + scope, page_count)
if pmax - pmin < scope:
pmin = pmax - scope if pmax - scope > 0 else 1
def get_url(page):
_url = "%s/page/%s" % (url, page) if page > 1 else url
if url_args:
_url = "%s?%s" % (_url, werkzeug.url_encode(url_args))
return _url
return {
"page_count": page_count,
"offset": (page - 1) * step,
"page": {
'url': get_url(page),
'num': page
},
"page_start": {
'url': get_url(pmin),
'num': pmin
},
"page_previous": {
'url': get_url(max(pmin, page - 1)),
'num': max(pmin, page - 1)
},
"page_next": {
'url': get_url(min(pmax, page + 1)),
'num': min(pmax, page + 1)
},
"page_end": {
'url': get_url(pmax),
'num': pmax
},
"pages": [
{'url': get_url(page), 'num': page}
for page in xrange(pmin, pmax+1)
]
}
def rule_is_enumerable(self, rule):
""" Checks that it is possible to generate sensible GET queries for
a given rule (if the endpoint matches its own requirements)
:type rule: werkzeug.routing.Rule
:rtype: bool
"""
endpoint = rule.endpoint
methods = rule.methods or ['GET']
converters = rule._converters.values()
if not ('GET' in methods
and endpoint.routing['type'] == 'http'
and endpoint.routing['auth'] in ('none', 'public')
and endpoint.routing.get('website', False)
and all(hasattr(converter, 'generate') for converter in converters)
and endpoint.routing.get('website')):
return False
# dont't list routes without argument having no default value or converter
spec = inspect.getargspec(endpoint.method.original_func)
# remove self and arguments having a default value
defaults_count = len(spec.defaults or [])
args = spec.args[1:(-defaults_count or None)]
# check that all args have a converter
return all( (arg in rule._converters) for arg in args)
def enumerate_pages(self, cr, uid, ids, query_string=None, context=None):
""" Available pages in the website/CMS. This is mostly used for links
generation and can be overridden by modules setting up new HTML
controllers for dynamic pages (e.g. blog).
By default, returns template views marked as pages.
:param str query_string: a (user-provided) string, fetches pages
matching the string
:returns: a list of mappings with two keys: ``name`` is the displayable
name of the resource (page), ``url`` is the absolute URL
of the same.
:rtype: list({name: str, url: str})
"""
router = request.httprequest.app.get_db_router(request.db)
# Force enumeration to be performed as public user
url_list = []
for rule in router.iter_rules():
if not self.rule_is_enumerable(rule):
continue
converters = rule._converters or {}
if query_string and not converters and (query_string not in rule.build([{}], append_unknown=False)[1]):
continue
values = [{}]
convitems = converters.items()
# converters with a domain are processed after the other ones
gd = lambda x: hasattr(x[1], 'domain') and (x[1].domain <> '[]')
convitems.sort(lambda x, y: cmp(gd(x), gd(y)))
for (i,(name, converter)) in enumerate(convitems):
newval = []
for val in values:
query = i==(len(convitems)-1) and query_string
for v in converter.generate(request.cr, uid, query=query, args=val, context=context):
newval.append( val.copy() )
v[name] = v['loc']
del v['loc']
newval[-1].update(v)
values = newval
for value in values:
domain_part, url = rule.build(value, append_unknown=False)
page = {'loc': url}
for key,val in value.items():
if key.startswith('__'):
page[key[2:]] = val
if url in ('/sitemap.xml',):
continue
if url in url_list:
continue
url_list.append(url)
yield page
def search_pages(self, cr, uid, ids, needle=None, limit=None, context=None):
name = (needle or "").replace("/page/website.", "").replace("/page/", "")
res = []
for page in self.enumerate_pages(cr, uid, ids, query_string=name, context=context):
if needle in page['loc']:
res.append(page)
if len(res) == limit:
break
return res
def kanban(self, cr, uid, ids, model, domain, column, template, step=None, scope=None, orderby=None, context=None):
step = step and int(step) or 10
scope = scope and int(scope) or 5
orderby = orderby or "name"
get_args = dict(request.httprequest.args or {})
model_obj = self.pool[model]
relation = model_obj._columns.get(column)._obj
relation_obj = self.pool[relation]
get_args.setdefault('kanban', "")
kanban = get_args.pop('kanban')
kanban_url = "?%s&kanban=" % werkzeug.url_encode(get_args)
pages = {}
for col in kanban.split(","):
if col:
col = col.split("-")
pages[int(col[0])] = int(col[1])
objects = []
for group in model_obj.read_group(cr, uid, domain, ["id", column], groupby=column):
obj = {}
# browse column
relation_id = group[column][0]
obj['column_id'] = relation_obj.browse(cr, uid, relation_id)
obj['kanban_url'] = kanban_url
for k, v in pages.items():
if k != relation_id:
obj['kanban_url'] += "%s-%s" % (k, v)
# pager
number = model_obj.search(cr, uid, group['__domain'], count=True)
obj['page_count'] = int(math.ceil(float(number) / step))
obj['page'] = pages.get(relation_id) or 1
if obj['page'] > obj['page_count']:
obj['page'] = obj['page_count']
offset = (obj['page']-1) * step
obj['page_start'] = max(obj['page'] - int(math.floor((scope-1)/2)), 1)
obj['page_end'] = min(obj['page_start'] + (scope-1), obj['page_count'])
# view data
obj['domain'] = group['__domain']
obj['model'] = model
obj['step'] = step
obj['orderby'] = orderby
# browse objects
object_ids = model_obj.search(cr, uid, group['__domain'], limit=step, offset=offset, order=orderby)
obj['object_ids'] = model_obj.browse(cr, uid, object_ids)
objects.append(obj)
values = {
'objects': objects,
'range': range,
'template': template,
}
return request.website._render("website.kanban_contain", values)
def kanban_col(self, cr, uid, ids, model, domain, page, template, step, orderby, context=None):
html = ""
model_obj = self.pool[model]
domain = safe_eval(domain)
step = int(step)
offset = (int(page)-1) * step
object_ids = model_obj.search(cr, uid, domain, limit=step, offset=offset, order=orderby)
object_ids = model_obj.browse(cr, uid, object_ids)
for object_id in object_ids:
html += request.website._render(template, {'object_id': object_id})
return html
def _image_placeholder(self, response):
# file_open may return a StringIO. StringIO can be closed but are
# not context managers in Python 2 though that is fixed in 3
with contextlib.closing(openerp.tools.misc.file_open(
os.path.join('web', 'static', 'src', 'img', 'placeholder.png'),
mode='rb')) as f:
response.data = f.read()
return response.make_conditional(request.httprequest)
def _image(self, cr, uid, model, id, field, response, max_width=maxint, max_height=maxint, cache=None, context=None):
""" Fetches the requested field and ensures it does not go above
(max_width, max_height), resizing it if necessary.
Resizing is bypassed if the object provides a $field_big, which will
be interpreted as a pre-resized version of the base field.
If the record is not found or does not have the requested field,
returns a placeholder image via :meth:`~._image_placeholder`.
Sets and checks conditional response parameters:
* :mailheader:`ETag` is always set (and checked)
* :mailheader:`Last-Modified is set iif the record has a concurrency
field (``__last_update``)
The requested field is assumed to be base64-encoded image data in
all cases.
"""
Model = self.pool[model]
id = int(id)
ids = Model.search(cr, uid,
[('id', '=', id)], context=context)
if not ids and 'website_published' in Model._fields:
ids = Model.search(cr, openerp.SUPERUSER_ID,
[('id', '=', id), ('website_published', '=', True)], context=context)
if not ids:
return self._image_placeholder(response)
concurrency = '__last_update'
[record] = Model.read(cr, openerp.SUPERUSER_ID, [id],
[concurrency, field],
context=context)
if concurrency in record:
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
try:
response.last_modified = datetime.datetime.strptime(
record[concurrency], server_format + '.%f')
except ValueError:
# just in case we have a timestamp without microseconds
response.last_modified = datetime.datetime.strptime(
record[concurrency], server_format)
# Field does not exist on model or field set to False
if not record.get(field):
# FIXME: maybe a field which does not exist should be a 404?
return self._image_placeholder(response)
response.set_etag(hashlib.sha1(record[field]).hexdigest())
response.make_conditional(request.httprequest)
if cache:
response.cache_control.max_age = cache
response.expires = int(time.time() + cache)
# conditional request match
if response.status_code == 304:
return response
data = record[field].decode('base64')
image = Image.open(cStringIO.StringIO(data))
response.mimetype = Image.MIME[image.format]
filename = '%s_%s.%s' % (model.replace('.', '_'), id, str(image.format).lower())
response.headers['Content-Disposition'] = 'inline; filename="%s"' % filename
if (not max_width) and (not max_height):
response.data = data
return response
w, h = image.size
max_w = int(max_width) if max_width else maxint
max_h = int(max_height) if max_height else maxint
if w < max_w and h < max_h:
response.data = data
else:
size = (max_w, max_h)
img = image_resize_and_sharpen(image, size, preserve_aspect_ratio=True)
image_save_for_web(img, response.stream, format=image.format)
# invalidate content-length computed by make_conditional as
# writing to response.stream does not do it (as of werkzeug 0.9.3)
del response.headers['Content-Length']
return response
def image_url(self, cr, uid, record, field, size=None, context=None):
"""Returns a local url that points to the image field of a given browse record."""
model = record._name
sudo_record = record.sudo()
id = '%s_%s' % (record.id, hashlib.sha1(sudo_record.write_date or sudo_record.create_date or '').hexdigest()[0:7])
size = '' if size is None else '/%s' % size
return '/website/image/%s/%s/%s%s' % (model, id, field, size)
class website_menu(osv.osv):
_name = "website.menu"
_description = "Website Menu"
_columns = {
'name': fields.char('Menu', required=True, translate=True),
'url': fields.char('Url'),
'new_window': fields.boolean('New Window'),
'sequence': fields.integer('Sequence'),
# TODO: support multiwebsite once done for ir.ui.views
'website_id': fields.many2one('website', 'Website'),
'parent_id': fields.many2one('website.menu', 'Parent Menu', select=True, ondelete="cascade"),
'child_id': fields.one2many('website.menu', 'parent_id', string='Child Menus'),
'parent_left': fields.integer('Parent Left', select=True),
'parent_right': fields.integer('Parent Right', select=True),
}
def __defaults_sequence(self, cr, uid, context):
menu = self.search_read(cr, uid, [(1,"=",1)], ["sequence"], limit=1, order="sequence DESC", context=context)
return menu and menu[0]["sequence"] or 0
_defaults = {
'url': '',
'sequence': __defaults_sequence,
'new_window': False,
}
_parent_store = True
_parent_order = 'sequence'
_order = "sequence"
# would be better to take a menu_id as argument
def get_tree(self, cr, uid, website_id, context=None):
def make_tree(node):
menu_node = dict(
id=node.id,
name=node.name,
url=node.url,
new_window=node.new_window,
sequence=node.sequence,
parent_id=node.parent_id.id,
children=[],
)
for child in node.child_id:
menu_node['children'].append(make_tree(child))
return menu_node
menu = self.pool.get('website').browse(cr, uid, website_id, context=context).menu_id
return make_tree(menu)
def save(self, cr, uid, website_id, data, context=None):
def replace_id(old_id, new_id):
for menu in data['data']:
if menu['id'] == old_id:
menu['id'] = new_id
if menu['parent_id'] == old_id:
menu['parent_id'] = new_id
to_delete = data['to_delete']
if to_delete:
self.unlink(cr, uid, to_delete, context=context)
for menu in data['data']:
mid = menu['id']
if isinstance(mid, str):
new_id = self.create(cr, uid, {'name': menu['name']}, context=context)
replace_id(mid, new_id)
for menu in data['data']:
self.write(cr, uid, [menu['id']], menu, context=context)
return True
class ir_attachment(osv.osv):
_inherit = "ir.attachment"
def _website_url_get(self, cr, uid, ids, name, arg, context=None):
result = {}
for attach in self.browse(cr, uid, ids, context=context):
if attach.url:
result[attach.id] = attach.url
else:
result[attach.id] = self.pool['website'].image_url(cr, uid, attach, 'datas')
return result
def _datas_checksum(self, cr, uid, ids, name, arg, context=None):
result = dict.fromkeys(ids, False)
attachments = self.read(cr, uid, ids, ['res_model'], context=context)
view_attachment_ids = [attachment['id'] for attachment in attachments if attachment['res_model'] == 'ir.ui.view']
for attach in self.read(cr, uid, view_attachment_ids, ['res_model', 'res_id', 'type', 'datas'], context=context):
result[attach['id']] = self._compute_checksum(attach)
return result
def _compute_checksum(self, attachment_dict):
if attachment_dict.get('res_model') == 'ir.ui.view'\
and not attachment_dict.get('res_id') and not attachment_dict.get('url')\
and attachment_dict.get('type', 'binary') == 'binary'\
and attachment_dict.get('datas'):
return hashlib.new('sha1', attachment_dict['datas']).hexdigest()
return None
def _datas_big(self, cr, uid, ids, name, arg, context=None):
result = dict.fromkeys(ids, False)
if context and context.get('bin_size'):
return result
for record in self.browse(cr, uid, ids, context=context):
if record.res_model != 'ir.ui.view' or not record.datas: continue
try:
result[record.id] = openerp.tools.image_resize_image_big(record.datas)
except IOError: # apparently the error PIL.Image.open raises
pass
return result
_columns = {
'datas_checksum': fields.function(_datas_checksum, size=40,
string="Datas checksum", type='char', store=True, select=True),
'website_url': fields.function(_website_url_get, string="Attachment URL", type='char'),
'datas_big': fields.function (_datas_big, type='binary', store=True,
string="Resized file content"),
'mimetype': fields.char('Mime Type', readonly=True),
}
def _add_mimetype_if_needed(self, values):
if values.get('datas_fname'):
values['mimetype'] = mimetypes.guess_type(values.get('datas_fname'))[0] or 'application/octet-stream'
def create(self, cr, uid, values, context=None):
chk = self._compute_checksum(values)
if chk:
match = self.search(cr, uid, [('datas_checksum', '=', chk)], context=context)
if match:
return match[0]
self._add_mimetype_if_needed(values)
return super(ir_attachment, self).create(
cr, uid, values, context=context)
def write(self, cr, uid, ids, values, context=None):
self._add_mimetype_if_needed(values)
return super(ir_attachment, self).write(cr, uid, ids, values, context=context)
def try_remove(self, cr, uid, ids, context=None):
""" Removes a web-based image attachment if it is used by no view
(template)
Returns a dict mapping attachments which would not be removed (if any)
mapped to the views preventing their removal
"""
Views = self.pool['ir.ui.view']
attachments_to_remove = []
# views blocking removal of the attachment
removal_blocked_by = {}
for attachment in self.browse(cr, uid, ids, context=context):
# in-document URLs are html-escaped, a straight search will not
# find them
url = escape(attachment.website_url)
ids = Views.search(cr, uid, ["|", ('arch', 'like', '"%s"' % url), ('arch', 'like', "'%s'" % url)], context=context)
if ids:
removal_blocked_by[attachment.id] = Views.read(
cr, uid, ids, ['name'], context=context)
else:
attachments_to_remove.append(attachment.id)
if attachments_to_remove:
self.unlink(cr, uid, attachments_to_remove, context=context)
return removal_blocked_by
class res_partner(osv.osv):
_inherit = "res.partner"
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
partner = self.browse(cr, uid, ids[0], context=context)
params = {
'center': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
'size': "%sx%s" % (height, width),
'zoom': zoom,
'sensor': 'false',
}
return urlplus('//maps.googleapis.com/maps/api/staticmap' , params)
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
partner = self.browse(cr, uid, ids[0], context=context)
params = {
'q': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
'z': 10
}
return urlplus('https://maps.google.com/maps' , params)
class res_company(osv.osv):
_inherit = "res.company"
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
partner = self.browse(cr, openerp.SUPERUSER_ID, ids[0], context=context).partner_id
return partner and partner.google_map_img(zoom, width, height, context=context) or None
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
partner = self.browse(cr, openerp.SUPERUSER_ID, ids[0], context=context).partner_id
return partner and partner.google_map_link(zoom, context=context) or None
class base_language_install(osv.osv_memory):
_inherit = "base.language.install"
_columns = {
'website_ids': fields.many2many('website', string='Websites to translate'),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
defaults = super(base_language_install, self).default_get(cr, uid, fields, context)
website_id = context.get('params', {}).get('website_id')
if website_id:
if 'website_ids' not in defaults:
defaults['website_ids'] = []
defaults['website_ids'].append(website_id)
return defaults
def lang_install(self, cr, uid, ids, context=None):
if context is None:
context = {}
action = super(base_language_install, self).lang_install(cr, uid, ids, context)
language_obj = self.browse(cr, uid, ids)[0]
website_ids = [website.id for website in language_obj['website_ids']]
lang_id = self.pool['res.lang'].search(cr, uid, [('code', '=', language_obj['lang'])])
if website_ids and lang_id:
data = {'language_ids': [(4, lang_id[0])]}
self.pool['website'].write(cr, uid, website_ids, data)
params = context.get('params', {})
if 'url_return' in params:
return {
'url': params['url_return'].replace('[lang]', language_obj['lang']),
'type': 'ir.actions.act_url',
'target': 'self'
}
return action
class website_seo_metadata(osv.Model):
_name = 'website.seo.metadata'
_description = 'SEO metadata'
_columns = {
'website_meta_title': fields.char("Website meta title", translate=True),
'website_meta_description': fields.text("Website meta description", translate=True),
'website_meta_keywords': fields.char("Website meta keywords", translate=True),
}
# vim:et:
| agpl-3.0 |
jeffmarcom/checkbox | plainbox/plainbox/impl/test_rfc822.py | 1 | 8809 | # This file is part of Checkbox.
#
# Copyright 2012 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <[email protected]>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
plainbox.impl.test_rfc822
=========================
Test definitions for plainbox.impl.rfc822 module
"""
from io import StringIO
from unittest import TestCase
from plainbox.impl.rfc822 import Origin
from plainbox.impl.rfc822 import RFC822Record
from plainbox.impl.rfc822 import load_rfc822_records
from plainbox.impl.rfc822 import dump_rfc822_records
from plainbox.impl.secure.checkbox_trusted_launcher import RFC822SyntaxError
class OriginTests(TestCase):
def setUp(self):
self.origin = Origin("file.txt", 10, 12)
def test_smoke(self):
self.assertEqual(self.origin.filename, "file.txt")
self.assertEqual(self.origin.line_start, 10)
self.assertEqual(self.origin.line_end, 12)
def test_repr(self):
expected = "<Origin filename:'file.txt' line_start:10 line_end:12>"
observed = repr(self.origin)
self.assertEqual(expected, observed)
def test_str(self):
expected = "file.txt:10-12"
observed = str(self.origin)
self.assertEqual(expected, observed)
class RFC822RecordTests(TestCase):
def test_smoke(self):
data = {'key': 'value'}
origin = Origin('file.txt', 1, 1)
record = RFC822Record(data, origin)
self.assertEqual(record.data, data)
self.assertEqual(record.origin, origin)
class RFC822ParserTestsMixIn():
loader = load_rfc822_records
def test_empty(self):
with StringIO("") as stream:
records = type(self).loader(stream)
self.assertEqual(len(records), 0)
def test_single_record(self):
with StringIO("key:value") as stream:
records = type(self).loader(stream)
self.assertEqual(len(records), 1)
self.assertEqual(records[0].data, {'key': 'value'})
def test_many_newlines(self):
text = (
"\n"
"\n"
"key1:value1\n"
"\n"
"\n"
"\n"
"key2:value2\n"
"\n"
"\n"
"key3:value3\n"
"\n"
"\n"
)
with StringIO(text) as stream:
records = type(self).loader(stream)
self.assertEqual(len(records), 3)
self.assertEqual(records[0].data, {'key1': 'value1'})
self.assertEqual(records[1].data, {'key2': 'value2'})
self.assertEqual(records[2].data, {'key3': 'value3'})
def test_many_records(self):
text = (
"key1:value1\n"
"\n"
"key2:value2\n"
"\n"
"key3:value3\n"
)
with StringIO(text) as stream:
records = type(self).loader(stream)
self.assertEqual(len(records), 3)
self.assertEqual(records[0].data, {'key1': 'value1'})
self.assertEqual(records[1].data, {'key2': 'value2'})
self.assertEqual(records[2].data, {'key3': 'value3'})
def test_multiline_value(self):
text = (
"key:\n"
" longer\n"
" value\n"
)
with StringIO(text) as stream:
records = type(self).loader(stream)
self.assertEqual(len(records), 1)
self.assertEqual(records[0].data, {'key': 'longer\nvalue'})
def test_multiline_value_with_space(self):
text = (
"key:\n"
" longer\n"
" .\n"
" value\n"
)
with StringIO(text) as stream:
records = type(self).loader(stream)
self.assertEqual(len(records), 1)
self.assertEqual(records[0].data, {'key': 'longer\n\nvalue'})
def test_multiline_value_with_period(self):
text = (
"key:\n"
" longer\n"
" ..\n"
" value\n"
)
with StringIO(text) as stream:
records = type(self).loader(stream)
self.assertEqual(len(records), 1)
self.assertEqual(records[0].data, {'key': 'longer\n.\nvalue'})
def test_many_multiline_values(self):
text = (
"key1:initial\n"
" longer\n"
" value 1\n"
"\n"
"key2:\n"
" longer\n"
" value 2\n"
)
with StringIO(text) as stream:
records = type(self).loader(stream)
self.assertEqual(len(records), 2)
self.assertEqual(records[0].data, {'key1': 'initial\nlonger\nvalue 1'})
self.assertEqual(records[1].data, {'key2': 'longer\nvalue 2'})
def test_irrelevant_whitespace(self):
text = "key : value "
with StringIO(text) as stream:
records = type(self).loader(stream)
self.assertEqual(len(records), 1)
self.assertEqual(records[0].data, {'key': 'value'})
def test_relevant_whitespace(self):
text = (
"key:\n"
" value\n"
)
with StringIO(text) as stream:
records = type(self).loader(stream)
self.assertEqual(len(records), 1)
self.assertEqual(records[0].data, {'key': 'value'})
def test_bad_multiline(self):
text = " extra value"
with StringIO(text) as stream:
with self.assertRaises(RFC822SyntaxError) as call:
type(self).loader(stream)
self.assertEqual(call.exception.msg, "Unexpected multi-line value")
def test_garbage(self):
text = "garbage"
with StringIO(text) as stream:
with self.assertRaises(RFC822SyntaxError) as call:
type(self).loader(stream)
self.assertEqual(call.exception.msg, "Unexpected non-empty line")
def test_syntax_error(self):
text = "key1 = value1"
with StringIO(text) as stream:
with self.assertRaises(RFC822SyntaxError) as call:
type(self).loader(stream)
self.assertEqual(call.exception.msg, "Unexpected non-empty line")
def test_duplicate_error(self):
text = (
"key1: value1\n"
"key1: value2\n"
)
with StringIO(text) as stream:
with self.assertRaises(RFC822SyntaxError) as call:
type(self).loader(stream)
self.assertEqual(call.exception.msg, (
"Job has a duplicate key 'key1' with old value 'value1'"
" and new value 'value2'"))
class RFC822ParserTests(TestCase, RFC822ParserTestsMixIn):
pass
class RFC822WriterTests(TestCase):
def test_single_record(self):
with StringIO() as stream:
dump_rfc822_records({'key': 'value'}, stream)
self.assertEqual(stream.getvalue(), "key: value\n\n")
def test_multiple_record(self):
with StringIO() as stream:
dump_rfc822_records({'key1': 'value1', 'key2': 'value2'}, stream)
self.assertIn(
stream.getvalue(), (
"key1: value1\nkey2: value2\n\n",
"key2: value2\nkey1: value1\n\n"))
def test_multiline_value(self):
text = (
"key:\n"
" longer\n"
" value\n\n"
)
with StringIO() as stream:
dump_rfc822_records({'key': 'longer\nvalue'}, stream)
self.assertEqual(stream.getvalue(), text)
def test_multiline_value_with_space(self):
text = (
"key:\n"
" longer\n"
" .\n"
" value\n\n"
)
with StringIO() as stream:
dump_rfc822_records({'key': 'longer\n\nvalue'}, stream)
self.assertEqual(stream.getvalue(), text)
def test_multiline_value_with_period(self):
text = (
"key:\n"
" longer\n"
" ..\n"
" value\n\n"
)
with StringIO() as stream:
dump_rfc822_records({'key': 'longer\n.\nvalue'}, stream)
self.assertEqual(stream.getvalue(), text)
def test_type_error(self):
with StringIO() as stream:
with self.assertRaises(AttributeError):
dump_rfc822_records(['key', 'value'], stream)
| gpl-3.0 |
RudoCris/horizon | openstack_dashboard/dashboards/project/data_processing/clusters/tables.py | 10 | 5388 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.http import Http404 # noqa
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import messages
from horizon import tables
from horizon.tables import base as tables_base
from openstack_dashboard.api import sahara as saharaclient
from saharaclient.api import base as api_base
LOG = logging.getLogger(__name__)
class ClustersFilterAction(tables.FilterAction):
filter_type = "server"
filter_choices = (('name', _("Name"), True),
('status', _("Status"), True))
class ClusterGuide(tables.LinkAction):
name = "cluster_guide"
verbose_name = _("Cluster Creation Guide")
url = "horizon:project:data_processing.wizard:cluster_guide"
class CreateCluster(tables.LinkAction):
name = "create"
verbose_name = _("Launch Cluster")
url = "horizon:project:data_processing.clusters:create-cluster"
classes = ("ajax-modal",)
icon = "plus"
class ScaleCluster(tables.LinkAction):
name = "scale"
verbose_name = _("Scale Cluster")
url = "horizon:project:data_processing.clusters:scale"
classes = ("ajax-modal", "btn-edit")
def allowed(self, request, cluster=None):
return cluster.status == "Active"
class DeleteCluster(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Cluster",
u"Delete Clusters",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Cluster",
u"Deleted Clusters",
count
)
def delete(self, request, obj_id):
saharaclient.cluster_delete(request, obj_id)
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
try:
return saharaclient.cluster_get(request, instance_id)
except api_base.APIException as e:
if e.error_code == 404:
raise Http404
else:
messages.error(request,
_("Unable to update row"))
def get_instances_count(cluster):
return sum([len(ng["instances"])
for ng in cluster.node_groups])
class RichErrorCell(tables_base.Cell):
@property
def status(self):
# The error cell values becomes quite complex and cannot be handled
# correctly with STATUS_CHOICES. Handling that explicitly.
status = self.datum.status.lower()
if status == "error":
return False
elif status == "active":
return True
return None
def get_rich_status_info(cluster):
return {
"status": cluster.status,
"status_description": cluster.status_description
}
def rich_status_filter(status_dict):
# Render the status "as is" if no description is provided.
if not status_dict["status_description"]:
return status_dict["status"]
# Error is rendered with a template containing an error description.
return render_to_string(
"project/data_processing.clusters/_rich_status.html", status_dict)
class ConfigureCluster(tables.LinkAction):
name = "configure"
verbose_name = _("Configure Cluster")
url = "horizon:project:data_processing.clusters:configure-cluster"
classes = ("ajax-modal", "configure-cluster-btn")
icon = "plus"
attrs = {"style": "display: none"}
class ClustersTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"),
link=("horizon:project:data_processing."
"clusters:details"))
plugin = tables.Column("plugin_name",
verbose_name=_("Plugin"))
version = tables.Column("hadoop_version",
verbose_name=_("Version"))
# Status field need the whole cluster object to build the rich status.
status = tables.Column(get_rich_status_info,
verbose_name=_("Status"),
status=True,
filters=(rich_status_filter,))
instances_count = tables.Column(get_instances_count,
verbose_name=_("Instances Count"))
class Meta(object):
name = "clusters"
verbose_name = _("Clusters")
row_class = UpdateRow
cell_class = RichErrorCell
status_columns = ["status"]
table_actions = (ClusterGuide,
CreateCluster,
ConfigureCluster,
DeleteCluster,
ClustersFilterAction)
row_actions = (ScaleCluster,
DeleteCluster,)
| apache-2.0 |
chrisglass/ufoai | contrib/scripts/ui/checkNodeAttributeUsage.py | 1 | 1746 | #!/usr/bin/python
#
# @brief check the usage of menuNode_s attributes into all menu file (call it from the root of the trunk)
# @license Public domain
# @return an XHTML page into stdout
# @todo reading "attributes" from the nodes.h file
#
import os, os.path, sys
# path where exists ufo binary
UFOAI_ROOT = os.path.realpath(sys.path[0] + '/../../..')
attributes = [
"textalign",
"text",
"font",
"icon",
"image",
"cvar",
"border",
"bgcolor",
"bordercolor",
"color",
"selectedColor",
"onClick",
"onRightClick",
"onMiddleClick",
"onWheel",
"onMouseEnter",
"onMouseLeave",
"onWheelUp",
"onWheelDown",
"onChange"
]
files = []
dir = UFOAI_ROOT + '/src/client/menu'
for f in os.listdir(dir):
if ".c" not in f:
continue
file = open(dir + '/' + f, "rt")
data = file.read()
file.close()
fd = f, data
files.append(fd)
dir = UFOAI_ROOT + '/src/client/menu/node'
for f in os.listdir(dir):
if ".c" not in f:
continue
file = open(dir + '/' + f, "rt")
data = file.read()
file.close()
fd = f, data
files.append(fd)
print "<html>"
print "<body>"
print "<table>"
print "<tr><td></td>\n"
for a in attributes:
print "<th>" + a + "</th>\n"
print "</tr>"
for fd in files:
f, data = fd
print "<tr><th>" + f + "</th>"
for a in attributes:
c = data.count("node->" + a)
style = ""
if c > 0:
style = " style=\"background-color:#00FF00\""
else:
a = ""
print "<td" + style + ">" + a + "</td>"
print "</tr>\n"
print "</table>"
print "<table>"
for a in attributes:
print "<tr><th>" + a + "</th>"
print "<td>"
for fd in files:
f, data = fd
c = data.count("node->" + a)
style = ""
if c > 0:
print f + " "
print "</td>"
print "</tr>"
print "</table>"
print "</body>"
print "</html>"
| gpl-2.0 |
klmitch/nova | nova/scheduler/driver.py | 1 | 2689 | # Copyright (c) 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler base class that all Schedulers should inherit from
"""
import abc
from nova import objects
from nova.scheduler import host_manager
from nova import servicegroup
class Scheduler(metaclass=abc.ABCMeta):
"""The base class that all Scheduler classes should inherit from."""
# TODO(mriedem): We should remove this flag now so that all scheduler
# drivers, both in-tree and out-of-tree, must rely on placement for
# scheduling decisions. We're likely going to have more and more code
# over time that relies on the scheduler creating allocations and it
# will not be sustainable to try and keep compatibility code around for
# scheduler drivers that do not create allocations in Placement.
USES_ALLOCATION_CANDIDATES = True
"""Indicates that the scheduler driver calls the Placement API for
allocation candidates and uses those allocation candidates in its
decision-making.
"""
def __init__(self):
self.host_manager = host_manager.HostManager()
self.servicegroup_api = servicegroup.API()
def run_periodic_tasks(self, context):
"""Manager calls this so drivers can perform periodic tasks."""
pass
def hosts_up(self, context, topic):
"""Return the list of hosts that have a running service for topic."""
services = objects.ServiceList.get_by_topic(context, topic)
return [service.host
for service in services
if self.servicegroup_api.service_is_up(service)]
@abc.abstractmethod
def select_destinations(self, context, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version=None, return_alternates=False):
"""Returns a list of lists of Selection objects that have been chosen
by the scheduler driver, one for each requested instance.
"""
return []
| apache-2.0 |
Edraak/edraak-platform | common/test/acceptance/tests/lms/test_lms_user_preview.py | 9 | 17924 | # -*- coding: utf-8 -*-
"""
Tests the "preview" selector in the LMS that allows changing between Staff, Learner, and Content Groups.
"""
from textwrap import dedent
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.instructor_dashboard import InstructorDashboardPage
from common.test.acceptance.pages.lms.staff_view import StaffCoursewarePage
from common.test.acceptance.tests.helpers import UniqueCourseTest, create_user_partition_json
from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID, MINIMUM_STATIC_PARTITION_ID, Group
@attr(shard=20)
class StaffViewTest(UniqueCourseTest):
"""
Tests that verify the staff view.
"""
USERNAME = "STAFF_TESTER"
EMAIL = "[email protected]"
def setUp(self):
super(StaffViewTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with sections/problems, tabs, updates, and handouts
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.populate_course_fixture(self.course_fixture) # pylint: disable=no-member
self.course_fixture.install()
# Auto-auth register for the course.
# Do this as global staff so that you will see the Staff View
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=True).visit()
def _goto_staff_page(self):
"""
Open staff page with assertion
"""
self.courseware_page.visit()
staff_page = StaffCoursewarePage(self.browser, self.course_id)
self.assertEqual(staff_page.staff_view_mode, 'Staff')
return staff_page
@attr(shard=20)
class CourseWithoutContentGroupsTest(StaffViewTest):
"""
Setup for tests that have no content restricted to specific content groups.
"""
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 2 problems.
"""
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<p>Choose Yes.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=problem_data),
XBlockFixtureDesc('problem', 'Test Problem 2', data=problem_data)
)
)
)
@attr(shard=20)
class StaffViewToggleTest(CourseWithoutContentGroupsTest):
"""
Tests for the staff view toggle button.
"""
def test_instructor_tab_visibility(self):
"""
Test that the instructor tab is hidden when viewing as a student.
"""
course_page = self._goto_staff_page()
self.assertTrue(course_page.has_tab('Instructor'))
course_page.set_staff_view_mode('Learner')
self.assertEqual(course_page.staff_view_mode, 'Learner')
self.assertFalse(course_page.has_tab('Instructor'))
@attr(shard=20)
class StaffDebugTest(CourseWithoutContentGroupsTest):
"""
Tests that verify the staff debug info.
"""
def test_reset_attempts_empty(self):
"""
Test that we reset even when there is no student state
"""
staff_debug_page = self._goto_staff_page().open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(
u'Successfully reset the attempts for user {}'.format(self.USERNAME), msg,
)
def test_delete_state_empty(self):
"""
Test that we delete properly even when there isn't state to delete.
"""
staff_debug_page = self._goto_staff_page().open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(
u'Successfully deleted student state for user {}'.format(self.USERNAME), msg,
)
def test_reset_attempts_state(self):
"""
Successfully reset the student attempts
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(
u'Successfully reset the attempts for user {}'.format(self.USERNAME), msg,
)
def test_rescore_problem(self):
"""
Rescore the student
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.rescore()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully rescored problem for user {}'.format(self.USERNAME), msg)
def test_rescore_problem_if_higher(self):
"""
Rescore the student
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.rescore_if_higher()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully rescored problem to improve score for user {}'.format(self.USERNAME), msg)
def test_student_state_delete(self):
"""
Successfully delete the student state with an answer
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully deleted student state for user {}'.format(self.USERNAME), msg)
def test_student_by_email(self):
"""
Successfully reset the student attempts using their email address
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts(self.EMAIL)
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts for user {}'.format(self.EMAIL), msg)
def test_bad_student(self):
"""
Test negative response with invalid user
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state('INVALIDUSER')
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Failed to delete student state for user. User does not exist.', msg)
def test_reset_attempts_for_problem_loaded_via_ajax(self):
"""
Successfully reset the student attempts for problem loaded via ajax.
"""
staff_page = self._goto_staff_page()
staff_page.load_problem_via_ajax()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts for user {}'.format(self.USERNAME), msg)
def test_rescore_state_for_problem_loaded_via_ajax(self):
"""
Rescore the student for problem loaded via ajax.
"""
staff_page = self._goto_staff_page()
staff_page.load_problem_via_ajax()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.rescore()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully rescored problem for user {}'.format(self.USERNAME), msg)
def test_student_state_delete_for_problem_loaded_via_ajax(self):
"""
Successfully delete the student state for problem loaded via ajax.
"""
staff_page = self._goto_staff_page()
staff_page.load_problem_via_ajax()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully deleted student state for user {}'.format(self.USERNAME), msg)
class CourseWithContentGroupsTest(StaffViewTest):
"""
Verifies that changing the "View this course as" selector works properly for content groups.
"""
def setUp(self):
super(CourseWithContentGroupsTest, self).setUp()
# pylint: disable=protected-access
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
MINIMUM_STATIC_PARTITION_ID,
'Configuration alpha,beta',
'Content Group Partition',
[
Group(MINIMUM_STATIC_PARTITION_ID + 1, 'alpha'),
Group(MINIMUM_STATIC_PARTITION_ID + 2, 'beta')
],
scheme="cohort"
)
],
},
})
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 3 problems.
One problem is visible to all, one problem is visible only to Group "alpha", and
one problem is visible only to Group "beta".
"""
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<choiceresponse>
<label>Choose Yes.</label>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
self.alpha_text = "VISIBLE TO ALPHA"
self.beta_text = "VISIBLE TO BETA"
self.audit_text = "VISIBLE TO AUDIT"
self.everyone_text = "VISIBLE TO EVERYONE"
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'problem',
self.alpha_text,
data=problem_data,
metadata={"group_access": {MINIMUM_STATIC_PARTITION_ID: [MINIMUM_STATIC_PARTITION_ID + 1]}}
),
XBlockFixtureDesc(
'problem',
self.beta_text,
data=problem_data,
metadata={"group_access": {MINIMUM_STATIC_PARTITION_ID: [MINIMUM_STATIC_PARTITION_ID + 2]}}
),
XBlockFixtureDesc(
'problem',
self.audit_text,
data=problem_data,
# Below 1 is the hardcoded group ID for "Audit"
metadata={"group_access": {ENROLLMENT_TRACK_PARTITION_ID: [1]}}
),
XBlockFixtureDesc(
'problem',
self.everyone_text,
data=problem_data
)
)
)
)
)
@attr(shard=20)
def test_staff_sees_all_problems(self):
"""
Scenario: Staff see all problems
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
Then I see all the problems, regardless of their group_access property
"""
course_page = self._goto_staff_page()
verify_expected_problem_visibility(
self,
course_page,
[self.alpha_text, self.beta_text, self.audit_text, self.everyone_text]
)
@attr(shard=3)
def test_student_not_in_content_group(self):
"""
Scenario: When previewing as a learner, only content visible to all is shown
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Learner
Then I see only problems visible to all users
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Learner')
verify_expected_problem_visibility(self, course_page, [self.everyone_text])
@attr(shard=3)
def test_as_student_in_alpha(self):
"""
Scenario: When previewing as a learner in group alpha, only content visible to alpha is shown
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Learner in group alpha
Then I see only problems visible to group alpha
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Learner in alpha')
verify_expected_problem_visibility(self, course_page, [self.alpha_text, self.everyone_text])
@attr(shard=3)
def test_as_student_in_beta(self):
"""
Scenario: When previewing as a learner in group beta, only content visible to beta is shown
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Learner in group beta
Then I see only problems visible to group beta
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Learner in beta')
verify_expected_problem_visibility(self, course_page, [self.beta_text, self.everyone_text])
@attr(shard=3)
def test_as_student_in_audit(self):
"""
Scenario: When previewing as a learner in the audit enrollment track, only content visible to audit is shown
Given I have a course with an enrollment_track user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Learner in audit enrollment track
Then I see only problems visible to audit enrollment track
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Learner in Audit')
verify_expected_problem_visibility(self, course_page, [self.audit_text, self.everyone_text])
def create_cohorts_and_assign_students(self, student_a_username, student_b_username):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one learner.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
cohort_management_page = instructor_dashboard_page.select_cohort_management()
cohort_management_page.is_cohorted = True
def add_cohort_with_student(cohort_name, content_group, student):
""" Create cohort and assign learner to it. """
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort Alpha", "alpha", student_a_username)
add_cohort_with_student("Cohort Beta", "beta", student_b_username)
cohort_management_page.wait_for_ajax()
@attr('a11y')
def test_course_page(self):
"""
Run accessibility audit for course staff pages.
"""
course_page = self._goto_staff_page()
course_page.a11y_audit.config.set_rules({
'ignore': [
'aria-allowed-attr', # TODO: AC-559
'aria-roles', # TODO: AC-559,
'aria-valid-attr', # TODO: AC-559
'color-contrast', # TODO: AC-559
'link-href', # TODO: AC-559
'section', # TODO: AC-559
]
})
course_page.a11y_audit.check_for_accessibility_errors()
def verify_expected_problem_visibility(test, courseware_page, expected_problems):
"""
Helper method that checks that the expected problems are visible on the current page.
"""
test.assertEqual(
len(expected_problems), courseware_page.num_xblock_components, "Incorrect number of visible problems"
)
for index, expected_problem in enumerate(expected_problems):
test.assertIn(expected_problem, courseware_page.xblock_components[index].text)
| agpl-3.0 |
cosmo-ethz/hope | test/test_operators.py | 1 | 3939 | # Copyright (C) 2014 ETH Zurich, Institute for Astronomy
"""
Test operators for `hope` module.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
import hope, itertools, pytest, sys, sysconfig, os, shutil
from test.utilities import random, check, make_test, JENKINS, min_dtypes, dtypes, shapes, setup_module, setup_method, teardown_module
@pytest.mark.parametrize("dtype,shape", itertools.product([dtype for dtype in dtypes if issubclass(dtype, np.integer) or dtype == int], shapes[1:]))
def test_binary_mod(dtype, shape):
if JENKINS and dtype == np.int8:
pytest.skip("Fails on debian: dtype={0!s}".format(dtype))
def fkt(a, b, c):
c[:] = a % b
hfkt = hope.jit(fkt)
(ao, ah), (bo, bh), (co, ch) = random(dtype, shape), random(dtype, shape), random(dtype, shape)
if np.count_nonzero(bo == 0) > 0: bo[bo == 0] += 1
if np.count_nonzero(bh == 0) > 0: bh[bh == 0] += 1
fkt(ao, bo, co), hfkt(ah, bh, ch)
assert check(co, ch)
fkt(ao, bo, co), hfkt(ah, bh, ch)
assert check(co, ch)
@pytest.mark.parametrize("dtype,shape", itertools.product([dtype for dtype in dtypes if issubclass(dtype, np.integer) or dtype == int], shapes[1:]))
def test_binary_lshifts(dtype, shape):
def fkt(a, b, c):
c[:] = a << b
hfkt = hope.jit(fkt)
(ao, ah), (bo, bh), (co, ch) = random(dtype, shape), random(dtype, shape), random(dtype, shape)
bo, bh = (bo % (np.dtype(dtype).itemsize * 8)).astype(dtype), (bh % (np.dtype(dtype).itemsize * 8)).astype(dtype)
fkt(ao, bo, co), hfkt(ah, bh, ch)
assert check(co, ch)
fkt(ao, bo, co), hfkt(ah, bh, ch)
assert check(co, ch)
@pytest.mark.parametrize("dtype,shape", itertools.product([dtype for dtype in dtypes if issubclass(dtype, np.integer) or dtype == int], shapes[1:]))
def test_binary_rshift(dtype, shape):
def fkt(a, b, c):
c[:] = a >> b
hfkt = hope.jit(fkt)
(ao, ah), (bo, bh), (co, ch) = random(dtype, shape), random(dtype, shape), random(dtype, shape)
bo, bh = (bo % (np.dtype(dtype).itemsize * 8)).astype(dtype), (bh % (np.dtype(dtype).itemsize * 8)).astype(dtype)
fkt(ao, bo, co), hfkt(ah, bh, ch)
assert check(co, ch)
fkt(ao, bo, co), hfkt(ah, bh, ch)
assert check(co, ch)
@pytest.mark.parametrize("dtype,shape", itertools.product([dtype for dtype in dtypes if issubclass(dtype, np.integer) or dtype == int], shapes[1:]))
def test_augmented_mod(dtype, shape):
def fkt(a, c):
c[:] %= a
hfkt = hope.jit(fkt)
(ao, ah), (co, ch) = random(dtype, shape), random(dtype, shape)
if np.count_nonzero(ao == 0) > 0: ao[ao == 0] += 1
if np.count_nonzero(ah == 0) > 0: ah[ah == 0] += 1
fkt(ao, co), hfkt(ah, ch)
assert check(co, ch)
fkt(ao, co), hfkt(ah, ch)
assert check(co, ch)
@pytest.mark.parametrize("dtype,shape", itertools.product([dtype for dtype in dtypes if issubclass(dtype, np.integer) or dtype == int], shapes[1:]))
def test_augmented_lshifts(dtype, shape):
def fkt(a, c):
c[:] <<= a
hfkt = hope.jit(fkt)
(ao, ah), (co, ch) = random(dtype, [10]), random(dtype, [10])
ao, ah = (ao % (np.dtype(dtype).itemsize * 8)).astype(dtype), (ah % (np.dtype(dtype).itemsize * 8)).astype(dtype)
fkt(ao, co), hfkt(ah, ch)
assert check(co, ch)
fkt(ao, co), hfkt(ah, ch)
assert check(co, ch)
@pytest.mark.parametrize("dtype,shape", itertools.product([dtype for dtype in dtypes if issubclass(dtype, np.integer) or dtype == int], shapes[1:]))
def test_augmented_rshift(dtype, shape):
def fkt(a, c):
c[:] >>= a
hfkt = hope.jit(fkt)
(ao, ah), (co, ch) = random(dtype, [10]), random(dtype, [10])
ao, ah = (ao % (np.dtype(dtype).itemsize * 8)).astype(dtype), (ah % (np.dtype(dtype).itemsize * 8)).astype(dtype)
fkt(ao, co), hfkt(ah, ch)
assert check(co, ch)
fkt(ao, co), hfkt(ah, ch)
assert check(co, ch)
| gpl-3.0 |
ofekd/servo | tests/wpt/harness/wptrunner/wptmanifest/tests/test_tokenizer.py | 195 | 11355 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
import unittest
sys.path.insert(0, os.path.abspath(".."))
from cStringIO import StringIO
from .. import parser
from ..parser import token_types
class TokenizerTest(unittest.TestCase):
def setUp(self):
self.tokenizer = parser.Tokenizer()
def tokenize(self, input_str):
rv = []
for item in self.tokenizer.tokenize(StringIO(input_str)):
rv.append(item)
if item[0] == token_types.eof:
break
return rv
def compare(self, input_text, expected):
expected = expected + [(token_types.eof, None)]
actual = self.tokenize(input_text)
self.assertEquals(actual, expected)
def test_heading_0(self):
self.compare("""[Heading text]""",
[(token_types.paren, "["),
(token_types.string, "Heading text"),
(token_types.paren, "]")])
def test_heading_1(self):
self.compare("""[Heading [text\]]""",
[(token_types.paren, "["),
(token_types.string, "Heading [text]"),
(token_types.paren, "]")])
def test_heading_2(self):
self.compare("""[Heading #text]""",
[(token_types.paren, "["),
(token_types.string, "Heading #text"),
(token_types.paren, "]")])
def test_heading_3(self):
self.compare("""[Heading [\]text]""",
[(token_types.paren, "["),
(token_types.string, "Heading []text"),
(token_types.paren, "]")])
def test_heading_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("[Heading")
def test_heading_5(self):
self.compare("""[Heading [\]text] #comment""",
[(token_types.paren, "["),
(token_types.string, "Heading []text"),
(token_types.paren, "]")])
def test_heading_6(self):
self.compare(r"""[Heading \ttext]""",
[(token_types.paren, "["),
(token_types.string, "Heading \ttext"),
(token_types.paren, "]")])
def test_key_0(self):
self.compare("""key:value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_1(self):
self.compare("""key : value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_2(self):
self.compare("""key : val ue""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "val ue")])
def test_key_3(self):
self.compare("""key: value#comment""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""ke y: value""")
def test_key_5(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key""")
def test_key_6(self):
self.compare("""key: "value\"""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_7(self):
self.compare("""key: 'value'""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_8(self):
self.compare("""key: "#value\"""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "#value")])
def test_key_9(self):
self.compare("""key: '#value\'""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "#value")])
def test_key_10(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: "value""")
def test_key_11(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value""")
def test_key_12(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value""")
def test_key_13(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value' abc""")
def test_key_14(self):
self.compare(r"""key: \\nb""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, r"\nb")])
def test_list_0(self):
self.compare(
"""
key: []""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.list_end, "]")])
def test_list_1(self):
self.compare(
"""
key: [a, "b"]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_list_2(self):
self.compare(
"""
key: [a,
b]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_list_3(self):
self.compare(
"""
key: [a, #b]
c]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "c"),
(token_types.list_end, "]")])
def test_list_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: [a #b]
c]""")
def test_list_5(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: [a \\
c]""")
def test_list_6(self):
self.compare(
"""key: [a , b]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_expr_0(self):
self.compare(
"""
key:
if cond == 1: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_1(self):
self.compare(
"""
key:
if cond == 1: value1
value2""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1"),
(token_types.separator, ":"),
(token_types.string, "value1"),
(token_types.string, "value2")])
def test_expr_2(self):
self.compare(
"""
key:
if cond=="1": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.string, "1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_3(self):
self.compare(
"""
key:
if cond==1.1: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_4(self):
self.compare(
"""
key:
if cond==1.1 and cond2 == "a": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.ident, "and"),
(token_types.ident, "cond2"),
(token_types.ident, "=="),
(token_types.string, "a"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_5(self):
self.compare(
"""
key:
if (cond==1.1 ): value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.paren, "("),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.paren, ")"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_6(self):
self.compare(
"""
key:
if "\\ttest": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.string, "\ttest"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_7(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1A: value""")
def test_expr_8(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1a: value""")
def test_expr_9(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1.1.1: value""")
def test_expr_10(self):
self.compare(
"""
key:
if 1.: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.number, "1."),
(token_types.separator, ":"),
(token_types.string, "value")])
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
silenceli/nova | nova/db/sqlalchemy/migrate_repo/versions/232_drop_dump_tables.py | 47 | 1090 | # Copyright 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = ['compute_node_stats', 'compute_nodes', 'instance_actions',
'instance_actions_events', 'instance_faults', 'migrations']
for table_name in table_names:
table = Table('dump_' + table_name, meta)
table.drop(checkfirst=True)
def downgrade(migrate_engine):
pass
| apache-2.0 |
galaxys-cm7miui-kernel/ICS-kernel-SGS | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
jlegendary/pybrain | examples/rl/environments/flexcube/flexcube_pgpe.py | 30 | 2997 | #!/usr/bin/env python
#########################################################################
# Reinforcement Learning with PGPE on the FlexCube Environment
#
# The FlexCube Environment is a Mass-Spring-System composed of 8 mass points.
# These resemble a cube with flexible edges.
#
# Control/Actions:
# The agent can control the 12 equilibrium edge lengths.
#
# A wide variety of sensors are available for observation and reward:
# - 12 edge lengths
# - 12 wanted edge lengths (the last action)
# - vertexes contact with floor
# - vertexes min height (distance of closest vertex to the floor)
# - distance to origin
# - distance and angle to target
#
# Task available are:
# - GrowTask, agent has to maximize the volume of the cube
# - JumpTask, agent has to maximize the distance of the lowest mass point during the episode
# - WalkTask, agent has to maximize the distance to the starting point
# - WalkDirectionTask, agent has to minimize the distance to a target point.
# - TargetTask, like the previous task but with several target points
#
# Requirements: pylab (for plotting only). If not available, comment the
# last 3 lines out
# Author: Frank Sehnke, [email protected]
#########################################################################
__author__ = "Frank Sehnke"
__version__ = '$Id$'
from pybrain.tools.example_tools import ExTools
from pybrain.structure.modules.tanhlayer import TanhLayer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.environments.flexcube import FlexCubeEnvironment, WalkTask
from pybrain.rl.agents import OptimizationAgent
from pybrain.optimization import PGPE
from pybrain.rl.experiments import EpisodicExperiment
hiddenUnits = 4
batch=1 #number of samples per learning step
prnts=1 #number of learning steps after results are printed
epis=5000000/batch/prnts #number of roleouts
numbExp=10 #number of experiments
et = ExTools(batch, prnts) #tool for printing and plotting
env = None
for runs in range(numbExp):
# create environment
#Options: Bool(OpenGL), Bool(Realtime simu. while client is connected), ServerIP(default:localhost), Port(default:21560)
if env != None: env.closeSocket()
env = FlexCubeEnvironment()
# create task
task = WalkTask(env)
# create controller network
net = buildNetwork(len(task.getObservation()), hiddenUnits, env.actLen, outclass=TanhLayer)
# create agent with controller and learner (and its options)
agent = OptimizationAgent(net, PGPE(storeAllEvaluations = True))
et.agent = agent
# create the experiment
experiment = EpisodicExperiment(task, agent)
#Do the experiment
for updates in range(epis):
for i in range(prnts):
experiment.doEpisodes(batch)
et.printResults((agent.learner._allEvaluations)[-50:-1], runs, updates)
et.addExps()
et.showExps()
#To view what the simulation is doing at the moment, go to pybrain/rl/environments/flexcube/ and start renderer.py (python-openGL musst be installed)
| bsd-3-clause |
rohit21122012/ASC-GMM | 2SVMClassify/libsvm-3.20/tools/easy.py | 152 | 2699 | #!/usr/bin/env python
import sys
import os
from subprocess import *
if len(sys.argv) <= 1:
print('Usage: {0} training_file [testing_file]'.format(sys.argv[0]))
raise SystemExit
# svm, grid, and gnuplot executable files
is_win32 = (sys.platform == 'win32')
if not is_win32:
svmscale_exe = "../svm-scale"
svmtrain_exe = "../svm-train"
svmpredict_exe = "../svm-predict"
grid_py = "./grid.py"
gnuplot_exe = "/usr/bin/gnuplot"
else:
# example for windows
svmscale_exe = r"..\windows\svm-scale.exe"
svmtrain_exe = r"..\windows\svm-train.exe"
svmpredict_exe = r"..\windows\svm-predict.exe"
gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe"
grid_py = r".\grid.py"
assert os.path.exists(svmscale_exe),"svm-scale executable not found"
assert os.path.exists(svmtrain_exe),"svm-train executable not found"
assert os.path.exists(svmpredict_exe),"svm-predict executable not found"
assert os.path.exists(gnuplot_exe),"gnuplot executable not found"
assert os.path.exists(grid_py),"grid.py not found"
train_pathname = sys.argv[1]
assert os.path.exists(train_pathname),"training file not found"
file_name = os.path.split(train_pathname)[1]
scaled_file = file_name + ".scale"
model_file = file_name + ".model"
range_file = file_name + ".range"
if len(sys.argv) > 2:
test_pathname = sys.argv[2]
file_name = os.path.split(test_pathname)[1]
assert os.path.exists(test_pathname),"testing file not found"
scaled_test_file = file_name + ".scale"
predict_test_file = file_name + ".predict"
cmd = '{0} -s "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, train_pathname, scaled_file)
print('Scaling training data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
cmd = '{0} -svmtrain "{1}" -gnuplot "{2}" "{3}"'.format(grid_py, svmtrain_exe, gnuplot_exe, scaled_file)
print('Cross validation...')
f = Popen(cmd, shell = True, stdout = PIPE).stdout
line = ''
while True:
last_line = line
line = f.readline()
if not line: break
c,g,rate = map(float,last_line.split())
print('Best c={0}, g={1} CV rate={2}'.format(c,g,rate))
cmd = '{0} -c {1} -g {2} "{3}" "{4}"'.format(svmtrain_exe,c,g,scaled_file,model_file)
print('Training...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
print('Output model: {0}'.format(model_file))
if len(sys.argv) > 2:
cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_pathname, scaled_test_file)
print('Scaling testing data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
cmd = '{0} "{1}" "{2}" "{3}"'.format(svmpredict_exe, scaled_test_file, model_file, predict_test_file)
print('Testing...')
Popen(cmd, shell = True).communicate()
print('Output prediction: {0}'.format(predict_test_file))
| mit |
shaform/disconnectator | disconnectator/settings.py | 1 | 2166 | """
Django settings for disconnectator project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xog51mfdsx#oca2td2*1&c7%y)0bfvd7v7%mt&r8+l(g%1*i8a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
AUTH_USER_MODEL = 'annotator.Annotator'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'annotator',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'disconnectator.urls'
WSGI_APPLICATION = 'disconnectator.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-US'
TIME_ZONE = 'Asia/Taipei'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/annotator/thanks/'
| lgpl-3.0 |
piece601/Openkore | src/scons-local-2.0.1/SCons/Tool/dvips.py | 61 | 3454 | """SCons.Tool.dvips
Tool-specific initialization for dvips.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/dvips.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Action
import SCons.Builder
import SCons.Tool.dvipdf
import SCons.Util
def DviPsFunction(target = None, source= None, env=None):
result = SCons.Tool.dvipdf.DviPdfPsFunction(PSAction,target,source,env)
return result
def DviPsStrFunction(target = None, source= None, env=None):
"""A strfunction for dvipdf that returns the appropriate
command string for the no_exec options."""
if env.GetOption("no_exec"):
result = env.subst('$PSCOM',0,target,source)
else:
result = ''
return result
PSAction = None
DVIPSAction = None
PSBuilder = None
def generate(env):
"""Add Builders and construction variables for dvips to an Environment."""
global PSAction
if PSAction is None:
PSAction = SCons.Action.Action('$PSCOM', '$PSCOMSTR')
global DVIPSAction
if DVIPSAction is None:
DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction = DviPsStrFunction)
global PSBuilder
if PSBuilder is None:
PSBuilder = SCons.Builder.Builder(action = PSAction,
prefix = '$PSPREFIX',
suffix = '$PSSUFFIX',
src_suffix = '.dvi',
src_builder = 'DVI',
single_source=True)
env['BUILDERS']['PostScript'] = PSBuilder
env['DVIPS'] = 'dvips'
env['DVIPSFLAGS'] = SCons.Util.CLVar('')
# I'm not quite sure I got the directories and filenames right for variant_dir
# We need to be in the correct directory for the sake of latex \includegraphics eps included files.
env['PSCOM'] = 'cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}'
env['PSPREFIX'] = ''
env['PSSUFFIX'] = '.ps'
def exists(env):
return env.Detect('dvips')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
defzzd/UserDataBase-Heroku | venv/Lib/site-packages/pip/commands/uninstall.py | 395 | 2203 | from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.basecommand import Command
from pip.exceptions import InstallationError
class UninstallCommand(Command):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
name = 'uninstall'
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
summary = 'Uninstall packages.'
def __init__(self, *args, **kw):
super(UninstallCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Uninstall all the packages listed in the given requirements file. '
'This option can be used multiple times.')
self.cmd_opts.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
session = self._build_session(options)
requirement_set = RequirementSet(
build_dir=None,
src_dir=None,
download_dir=None,
session=session,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name))
for filename in options.requirements:
for req in parse_requirements(filename,
options=options, session=session):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
raise InstallationError('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % dict(name=self.name))
requirement_set.uninstall(auto_confirm=options.yes)
| mit |
tjsavage/djangononrel-starter | django/contrib/admin/templatetags/log.py | 310 | 2270 | from django import template
from django.contrib.admin.models import LogEntry
register = template.Library()
class AdminLogNode(template.Node):
def __init__(self, limit, varname, user):
self.limit, self.varname, self.user = limit, varname, user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
if self.user is None:
context[self.varname] = LogEntry.objects.all().select_related('content_type', 'user')[:self.limit]
else:
user_id = self.user
if not user_id.isdigit():
user_id = context[self.user].id
context[self.varname] = LogEntry.objects.filter(user__id__exact=user_id).select_related('content_type', 'user')[:self.limit]
return ''
class DoGetAdminLog:
"""
Populates a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``context_var_containing_user_obj`` can be a hard-coded integer
(user ID) or the name of a template context variable containing the user
object whose ID you want.
"""
def __init__(self, tag_name):
self.tag_name = tag_name
def __call__(self, parser, token):
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError("'%s' statements require two arguments" % self.tag_name)
if not tokens[1].isdigit():
raise template.TemplateSyntaxError("First argument in '%s' must be an integer" % self.tag_name)
if tokens[2] != 'as':
raise template.TemplateSyntaxError("Second argument in '%s' must be 'as'" % self.tag_name)
if len(tokens) > 4:
if tokens[4] != 'for_user':
raise template.TemplateSyntaxError("Fourth argument in '%s' must be 'for_user'" % self.tag_name)
return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(len(tokens) > 5 and tokens[5] or None))
register.tag('get_admin_log', DoGetAdminLog('get_admin_log'))
| bsd-3-clause |
wonjohnchoi/EE122-Project3 | pox/dumb_l2_switch/dumb_l2_switch.py | 1 | 3696 | # Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.revent.revent import *
log = core.getLogger()
class LearningSwitch (EventMixin):
"""
The learning switch "brain" associated with a single OpenFlow switch.
When we see a packet, we'd like to output it on a port which will eventually
lead to the destination. To accomplish this, we build a table that maps
addresses to ports.
We populate the table by observing traffic. When we see a packet from some
source coming from some port, we know that source is out that port.
When we want to forward traffic, we look up the desintation in our table. If
we don't know the port, we simply send the message out all ports except the
one it came in on. (In the presence of loops, this is bad!).
In short, our algorithm looks like this:
For each new flow:
1) Use source address and port to update address/port table
2) Is destination multicast?
Yes:
2a) Flood the packet
No:
2b) Port for destination address in our address/port table?
No:
2ba) Flood the packet
Yes:
2bb1) Install flow table entry in the switch so that this flow
goes out the appopriate port
2bb2) Send buffered packet out appopriate port
"""
def __init__ (self, connection):
# Switch we'll be adding L2 learning switch capabilities to
self.connection = connection
# Our table
self.macToPort = {}
# We want to hear PacketIn messages, so we listen
self.listenTo(connection)
def _handle_PacketIn (self, event):
"""
Handles packet in messages from the switch to implement above algorithm.
"""
def flood ():
""" Floods the packet """
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
self.connection.send(msg)
packet = event.parse()
self.macToPort[packet.src] = event.port # 1
if packet.dst.isMulticast():
flood() # 2a
else:
if packet.dst not in self.macToPort:
log.debug("port for %s unknown -- flooding" % (packet.dst,))
flood() # 2ba
else:
# 2bb
port = self.macToPort[packet.dst]
log.debug("installing flow for %s.%i -> %s.%i" %
(packet.src, event.port, packet.dst, port))
msg = of.ofp_flow_mod()
msg.match = of.ofp_match.from_packet(packet)
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.actions.append(of.ofp_action_output(port = port))
msg.buffer_id = event.ofp.buffer_id
self.connection.send(msg)
class dumb_l2_switch (EventMixin):
"""
Waits for OpenFlow switches to connect and makes them learning switches.
"""
def __init__ (self):
self.listenTo(core.openflow)
def _handle_ConnectionUp (self, event):
log.debug("Connection %s" % (event.connection,))
LearningSwitch(event.connection)
| gpl-3.0 |
Andr3iC/courtlistener | cl/people_db/migrations/0005_auto_20160318_1806.py | 2 | 1776 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people_db', '0004_load_races'),
]
operations = [
migrations.AddField(
model_name='person',
name='religion',
field=models.CharField(blank=True, max_length=2, choices=[(b'ca', b'Catholic'), (b'pr', b'Protestant'), (b'je', b'Jewish'), (b'mu', b'Muslim'), (b'at', b'Atheist'), (b'ag', b'Agnostic'), (b'mo', b'Mormon'), (b'bu', b'Buddhist'), (b'hi', b'Hindu')]),
),
migrations.AlterField(
model_name='education',
name='degree_level',
field=models.CharField(blank=True, max_length=3, choices=[(b'ba', b"Bachelor's (e.g. B.A.)"), (b'ma', b"Master's (e.g. M.A.)"), (b'jd', b'Juris Doctor (J.D.)'), (b'llm', b'Master of Laws (LL.M)'), (b'llb', b'Bachelor of Laws (e.g. LL.B)'), (b'jsd', b'Doctor of Law (J.S.D)'), (b'phd', b'Doctor of Philosophy (PhD)'), (b'aa', b'Associate (e.g. A.A.)'), (b'md', b'Medical Degree (M.D.)'), (b'mba', b'Master of Business Administration (M.B.A.)')]),
),
migrations.AlterField(
model_name='person',
name='gender',
field=models.CharField(blank=True, max_length=2, choices=[(b'm', b'Male'), (b'f', b'Female'), (b'o', b'Other')]),
),
migrations.AlterField(
model_name='race',
name='race',
field=models.CharField(unique=True, max_length=5, choices=[(b'w', b'White'), (b'b', b'Black or African American'), (b'i', b'American Indian or Alaska Native'), (b'a', b'Asian'), (b'p', b'Native Hawaiian or Other Pacific Islander'), (b'h', b'Hispanic/Latino')]),
),
]
| agpl-3.0 |
rob-smallshire/asq | asq/test/test_last_or_default.py | 1 | 2437 | import unittest
from asq.queryables import Queryable
__author__ = "Sixty North"
class TestLastOrDefault(unittest.TestCase):
def test_last_or_default(self):
a = [42, 45, 23, 12]
b = Queryable(a).last_or_default(37)
self.assertEqual(b, 12)
def test_last_or_default_non_sequence(self):
def series():
yield 42
yield 45
yield 23
yield 12
a = series()
b = Queryable(a).last_or_default(37)
self.assertEqual(b, 12)
def test_last_or_default_empty(self):
a = []
b = Queryable(a).last_or_default(37)
self.assertEqual(b, 37)
def test_last_or_default_non_sequence_empty(self):
def series():
if False:
yield 42
yield 45
yield 23
yield 12
a = series()
b = Queryable(a).last_or_default(37)
self.assertEqual(b, 37)
def test_last_or_default_predicate(self):
a = [37, 54, 57, 23, 12]
b = Queryable(a).last_or_default(12, lambda x: x >= 50)
self.assertEqual(b, 57)
def test_last_or_default_non_sequence_predicate(self):
def series():
yield 42
yield 45
yield 23
yield 12
a = series()
b = Queryable(a).last_or_default(37, lambda x: x > 15)
self.assertEqual(b, 23)
def test_last_or_default_predicate_empty(self):
a = []
b = Queryable(a).last_or_default(12, lambda x: x >= 50)
self.assertEqual(b, 12)
def test_last_or_default_non_sequence_predicate_empty(self):
def series():
if False:
yield 42
yield 45
yield 23
yield 12
a = series()
b = Queryable(a).last_or_default(37, lambda x: x > 15)
self.assertEqual(b, 37)
def test_last_or_default_predicate_missing(self):
a = [37, 42, 23, 12]
b = Queryable(a).last_or_default(78, lambda x: x >= 50)
self.assertEqual(b, 78)
def test_last_or_default_predicate_not_callable(self):
a = [37, 54, 57, 23, 12]
self.assertRaises(TypeError, lambda: Queryable(a).last_or_default(12, "not callable"))
def test_last_or_default_closed(self):
b = Queryable([])
b.close()
self.assertRaises(ValueError, lambda: b.last_or_default(37))
| mit |
scrollback/kuma | vendor/packages/pyparsing/docs/examples/simpleArith.py | 16 | 2319 | #
# simpleArith.py
#
# Example of defining an arithmetic expression parser using
# the operatorPrecedence helper method in pyparsing.
#
# Copyright 2006, by Paul McGuire
#
from pyparsing import *
integer = Word(nums).setParseAction(lambda t:int(t[0]))
variable = Word(alphas,exact=1)
operand = integer | variable
expop = Literal('^')
signop = oneOf('+ -')
multop = oneOf('* /')
plusop = oneOf('+ -')
factop = Literal('!')
# To use the operatorPrecedence helper:
# 1. Define the "atom" operand term of the grammar.
# For this simple grammar, the smallest operand is either
# and integer or a variable. This will be the first argument
# to the operatorPrecedence method.
# 2. Define a list of tuples for each level of operator
# precendence. Each tuple is of the form
# (opExpr, numTerms, rightLeftAssoc, parseAction), where
# - opExpr is the pyparsing expression for the operator;
# may also be a string, which will be converted to a Literal
# - numTerms is the number of terms for this operator (must
# be 1 or 2)
# - rightLeftAssoc is the indicator whether the operator is
# right or left associative, using the pyparsing-defined
# constants opAssoc.RIGHT and opAssoc.LEFT.
# - parseAction is the parse action to be associated with
# expressions matching this operator expression (the
# parse action tuple member may be omitted)
# 3. Call operatorPrecedence passing the operand expression and
# the operator precedence list, and save the returned value
# as the generated pyparsing expression. You can then use
# this expression to parse input strings, or incorporate it
# into a larger, more complex grammar.
#
expr = operatorPrecedence( operand,
[("!", 1, opAssoc.LEFT),
("^", 2, opAssoc.RIGHT),
(signop, 1, opAssoc.RIGHT),
(multop, 2, opAssoc.LEFT),
(plusop, 2, opAssoc.LEFT),]
)
test = ["9 + 2 + 3",
"9 + 2 * 3",
"(9 + 2) * 3",
"(9 + -2) * 3",
"(9 + -2) * 3^2^2",
"(9! + -2) * 3^2^2",
"M*X + B",
"M*(X + B)",
"1+2*-3^4*5+-+-6",]
for t in test:
print t
print expr.parseString(t)
print
| mpl-2.0 |
toobaz/pandas | pandas/tests/io/excel/test_xlsxwriter.py | 2 | 1980 | import warnings
import pytest
from pandas import DataFrame
from pandas.util.testing import ensure_clean
from pandas.io.excel import ExcelWriter
xlsxwriter = pytest.importorskip("xlsxwriter")
pytestmark = pytest.mark.parametrize("ext", [".xlsx"])
def test_column_format(ext):
# Test that column formats are applied to cells. Test for issue #9167.
# Applicable to xlsxwriter only.
with warnings.catch_warnings():
# Ignore the openpyxl lxml warning.
warnings.simplefilter("ignore")
openpyxl = pytest.importorskip("openpyxl")
with ensure_clean(ext) as path:
frame = DataFrame({"A": [123456, 123456], "B": [123456, 123456]})
writer = ExcelWriter(path)
frame.to_excel(writer)
# Add a number format to col B and ensure it is applied to cells.
num_format = "#,##0"
write_workbook = writer.book
write_worksheet = write_workbook.worksheets()[0]
col_format = write_workbook.add_format({"num_format": num_format})
write_worksheet.set_column("B:B", None, col_format)
writer.save()
read_workbook = openpyxl.load_workbook(path)
try:
read_worksheet = read_workbook["Sheet1"]
except TypeError:
# compat
read_worksheet = read_workbook.get_sheet_by_name(name="Sheet1")
# Get the number format from the cell.
try:
cell = read_worksheet["B2"]
except TypeError:
# compat
cell = read_worksheet.cell("B2")
try:
read_num_format = cell.number_format
except Exception:
read_num_format = cell.style.number_format._format_code
assert read_num_format == num_format
def test_write_append_mode_raises(ext):
msg = "Append mode is not supported with xlsxwriter!"
with ensure_clean(ext) as f:
with pytest.raises(ValueError, match=msg):
ExcelWriter(f, engine="xlsxwriter", mode="a")
| bsd-3-clause |
chen0510566/MissionPlanner | Lib/xdrlib.py | 55 | 5794 | """Implements (a subset of) Sun XDR -- eXternal Data Representation.
See: RFC 1014
"""
import struct
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
__all__ = ["Error", "Packer", "Unpacker", "ConversionError"]
# exceptions
class Error(Exception):
"""Exception class for this module. Use:
except xdrlib.Error, var:
# var has the Error instance for the exception
Public ivars:
msg -- contains the message
"""
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return repr(self.msg)
def __str__(self):
return str(self.msg)
class ConversionError(Error):
pass
class Packer:
"""Pack various data representations into a buffer."""
def __init__(self):
self.reset()
def reset(self):
self.__buf = _StringIO()
def get_buffer(self):
return self.__buf.getvalue()
# backwards compatibility
get_buf = get_buffer
def pack_uint(self, x):
self.__buf.write(struct.pack('>L', x))
def pack_int(self, x):
self.__buf.write(struct.pack('>l', x))
pack_enum = pack_int
def pack_bool(self, x):
if x: self.__buf.write('\0\0\0\1')
else: self.__buf.write('\0\0\0\0')
def pack_uhyper(self, x):
self.pack_uint(x>>32 & 0xffffffffL)
self.pack_uint(x & 0xffffffffL)
pack_hyper = pack_uhyper
def pack_float(self, x):
try: self.__buf.write(struct.pack('>f', x))
except struct.error, msg:
raise ConversionError, msg
def pack_double(self, x):
try: self.__buf.write(struct.pack('>d', x))
except struct.error, msg:
raise ConversionError, msg
def pack_fstring(self, n, s):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
data = s[:n]
n = ((n+3)//4)*4
data = data + (n - len(data)) * '\0'
self.__buf.write(data)
pack_fopaque = pack_fstring
def pack_string(self, s):
n = len(s)
self.pack_uint(n)
self.pack_fstring(n, s)
pack_opaque = pack_string
pack_bytes = pack_string
def pack_list(self, list, pack_item):
for item in list:
self.pack_uint(1)
pack_item(item)
self.pack_uint(0)
def pack_farray(self, n, list, pack_item):
if len(list) != n:
raise ValueError, 'wrong array size'
for item in list:
pack_item(item)
def pack_array(self, list, pack_item):
n = len(list)
self.pack_uint(n)
self.pack_farray(n, list, pack_item)
class Unpacker:
"""Unpacks various data representations from the given buffer."""
def __init__(self, data):
self.reset(data)
def reset(self, data):
self.__buf = data
self.__pos = 0
def get_position(self):
return self.__pos
def set_position(self, position):
self.__pos = position
def get_buffer(self):
return self.__buf
def done(self):
if self.__pos < len(self.__buf):
raise Error('unextracted data remains')
def unpack_uint(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
x = struct.unpack('>L', data)[0]
try:
return int(x)
except OverflowError:
return x
def unpack_int(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>l', data)[0]
unpack_enum = unpack_int
def unpack_bool(self):
return bool(self.unpack_int())
def unpack_uhyper(self):
hi = self.unpack_uint()
lo = self.unpack_uint()
return long(hi)<<32 | lo
def unpack_hyper(self):
x = self.unpack_uhyper()
if x >= 0x8000000000000000L:
x = x - 0x10000000000000000L
return x
def unpack_float(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>f', data)[0]
def unpack_double(self):
i = self.__pos
self.__pos = j = i+8
data = self.__buf[i:j]
if len(data) < 8:
raise EOFError
return struct.unpack('>d', data)[0]
def unpack_fstring(self, n):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
i = self.__pos
j = i + (n+3)//4*4
if j > len(self.__buf):
raise EOFError
self.__pos = j
return self.__buf[i:i+n]
unpack_fopaque = unpack_fstring
def unpack_string(self):
n = self.unpack_uint()
return self.unpack_fstring(n)
unpack_opaque = unpack_string
unpack_bytes = unpack_string
def unpack_list(self, unpack_item):
list = []
while 1:
x = self.unpack_uint()
if x == 0: break
if x != 1:
raise ConversionError, '0 or 1 expected, got %r' % (x,)
item = unpack_item()
list.append(item)
return list
def unpack_farray(self, n, unpack_item):
list = []
for i in range(n):
list.append(unpack_item())
return list
def unpack_array(self, unpack_item):
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item)
| gpl-3.0 |
doguitar/crossword | cherrypy/tutorial/tut05_derived_objects.py | 22 | 2266 | """
Tutorial - Object inheritance
You are free to derive your request handler classes from any base
class you wish. In most real-world applications, you will probably
want to create a central base class used for all your pages, which takes
care of things like printing a common page header and footer.
"""
import cherrypy
class Page:
# Store the page title in a class attribute
title = 'Untitled Page'
def header(self):
return '''
<html>
<head>
<title>%s</title>
<head>
<body>
<h2>%s</h2>
''' % (self.title, self.title)
def footer(self):
return '''
</body>
</html>
'''
# Note that header and footer don't get their exposed attributes
# set to True. This isn't necessary since the user isn't supposed
# to call header or footer directly; instead, we'll call them from
# within the actually exposed handler methods defined in this
# class' subclasses.
class HomePage(Page):
# Different title for this page
title = 'Tutorial 5'
def __init__(self):
# create a subpage
self.another = AnotherPage()
def index(self):
# Note that we call the header and footer methods inherited
# from the Page class!
return self.header() + '''
<p>
Isn't this exciting? There's
<a href="./another/">another page</a>, too!
</p>
''' + self.footer()
index.exposed = True
class AnotherPage(Page):
title = 'Another Page'
def index(self):
return self.header() + '''
<p>
And this is the amazing second page!
</p>
''' + self.footer()
index.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HomePage(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(HomePage(), config=tutconf)
| mit |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_web_application_firewall_policies_operations.py | 1 | 20786 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class WebApplicationFirewallPoliciesOperations(object):
"""WebApplicationFirewallPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WebApplicationFirewallPolicyListResult"]
"""Lists all of the protection policies within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_02_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WebApplicationFirewallPolicyListResult"]
"""Gets all the WAF policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_02_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
def get(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.WebApplicationFirewallPolicy"
"""Retrieve protection policy with specified name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_02_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
policy_name, # type: str
parameters, # type: "_models.WebApplicationFirewallPolicy"
**kwargs # type: Any
):
# type: (...) -> "_models.WebApplicationFirewallPolicy"
"""Creates or update policy with specified rule set name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:param parameters: Policy to be created.
:type parameters: ~azure.mgmt.network.v2019_02_01.models.WebApplicationFirewallPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_02_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'WebApplicationFirewallPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
policy_name=policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
| mit |
sebgoa/client-python | kubernetes/client/models/extensions_v1beta1_deployment_condition.py | 2 | 7599 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ExtensionsV1beta1DeploymentCondition(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, last_transition_time=None, last_update_time=None, message=None, reason=None, status=None, type=None):
"""
ExtensionsV1beta1DeploymentCondition - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'last_transition_time': 'datetime',
'last_update_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
self.attribute_map = {
'last_transition_time': 'lastTransitionTime',
'last_update_time': 'lastUpdateTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
self._last_transition_time = last_transition_time
self._last_update_time = last_update_time
self._message = message
self._reason = reason
self._status = status
self._type = type
@property
def last_transition_time(self):
"""
Gets the last_transition_time of this ExtensionsV1beta1DeploymentCondition.
Last time the condition transitioned from one status to another.
:return: The last_transition_time of this ExtensionsV1beta1DeploymentCondition.
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""
Sets the last_transition_time of this ExtensionsV1beta1DeploymentCondition.
Last time the condition transitioned from one status to another.
:param last_transition_time: The last_transition_time of this ExtensionsV1beta1DeploymentCondition.
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def last_update_time(self):
"""
Gets the last_update_time of this ExtensionsV1beta1DeploymentCondition.
The last time this condition was updated.
:return: The last_update_time of this ExtensionsV1beta1DeploymentCondition.
:rtype: datetime
"""
return self._last_update_time
@last_update_time.setter
def last_update_time(self, last_update_time):
"""
Sets the last_update_time of this ExtensionsV1beta1DeploymentCondition.
The last time this condition was updated.
:param last_update_time: The last_update_time of this ExtensionsV1beta1DeploymentCondition.
:type: datetime
"""
self._last_update_time = last_update_time
@property
def message(self):
"""
Gets the message of this ExtensionsV1beta1DeploymentCondition.
A human readable message indicating details about the transition.
:return: The message of this ExtensionsV1beta1DeploymentCondition.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this ExtensionsV1beta1DeploymentCondition.
A human readable message indicating details about the transition.
:param message: The message of this ExtensionsV1beta1DeploymentCondition.
:type: str
"""
self._message = message
@property
def reason(self):
"""
Gets the reason of this ExtensionsV1beta1DeploymentCondition.
The reason for the condition's last transition.
:return: The reason of this ExtensionsV1beta1DeploymentCondition.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this ExtensionsV1beta1DeploymentCondition.
The reason for the condition's last transition.
:param reason: The reason of this ExtensionsV1beta1DeploymentCondition.
:type: str
"""
self._reason = reason
@property
def status(self):
"""
Gets the status of this ExtensionsV1beta1DeploymentCondition.
Status of the condition, one of True, False, Unknown.
:return: The status of this ExtensionsV1beta1DeploymentCondition.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this ExtensionsV1beta1DeploymentCondition.
Status of the condition, one of True, False, Unknown.
:param status: The status of this ExtensionsV1beta1DeploymentCondition.
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`")
self._status = status
@property
def type(self):
"""
Gets the type of this ExtensionsV1beta1DeploymentCondition.
Type of deployment condition.
:return: The type of this ExtensionsV1beta1DeploymentCondition.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this ExtensionsV1beta1DeploymentCondition.
Type of deployment condition.
:param type: The type of this ExtensionsV1beta1DeploymentCondition.
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ExtensionsV1beta1DeploymentCondition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 |
googleapis/googleapis-gen | google/cloud/websecurityscanner/v1beta/websecurityscanner-v1beta-py/google/cloud/websecurityscanner_v1beta/__init__.py | 2 | 3295 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.web_security_scanner import WebSecurityScannerClient
from .services.web_security_scanner import WebSecurityScannerAsyncClient
from .types.crawled_url import CrawledUrl
from .types.finding import Finding
from .types.finding_addon import Form
from .types.finding_addon import OutdatedLibrary
from .types.finding_addon import ViolatingResource
from .types.finding_addon import VulnerableHeaders
from .types.finding_addon import VulnerableParameters
from .types.finding_addon import Xss
from .types.finding_type_stats import FindingTypeStats
from .types.scan_config import ScanConfig
from .types.scan_config_error import ScanConfigError
from .types.scan_run import ScanRun
from .types.scan_run_error_trace import ScanRunErrorTrace
from .types.scan_run_warning_trace import ScanRunWarningTrace
from .types.web_security_scanner import CreateScanConfigRequest
from .types.web_security_scanner import DeleteScanConfigRequest
from .types.web_security_scanner import GetFindingRequest
from .types.web_security_scanner import GetScanConfigRequest
from .types.web_security_scanner import GetScanRunRequest
from .types.web_security_scanner import ListCrawledUrlsRequest
from .types.web_security_scanner import ListCrawledUrlsResponse
from .types.web_security_scanner import ListFindingsRequest
from .types.web_security_scanner import ListFindingsResponse
from .types.web_security_scanner import ListFindingTypeStatsRequest
from .types.web_security_scanner import ListFindingTypeStatsResponse
from .types.web_security_scanner import ListScanConfigsRequest
from .types.web_security_scanner import ListScanConfigsResponse
from .types.web_security_scanner import ListScanRunsRequest
from .types.web_security_scanner import ListScanRunsResponse
from .types.web_security_scanner import StartScanRunRequest
from .types.web_security_scanner import StopScanRunRequest
from .types.web_security_scanner import UpdateScanConfigRequest
__all__ = (
'WebSecurityScannerAsyncClient',
'CrawledUrl',
'CreateScanConfigRequest',
'DeleteScanConfigRequest',
'Finding',
'FindingTypeStats',
'Form',
'GetFindingRequest',
'GetScanConfigRequest',
'GetScanRunRequest',
'ListCrawledUrlsRequest',
'ListCrawledUrlsResponse',
'ListFindingTypeStatsRequest',
'ListFindingTypeStatsResponse',
'ListFindingsRequest',
'ListFindingsResponse',
'ListScanConfigsRequest',
'ListScanConfigsResponse',
'ListScanRunsRequest',
'ListScanRunsResponse',
'OutdatedLibrary',
'ScanConfig',
'ScanConfigError',
'ScanRun',
'ScanRunErrorTrace',
'ScanRunWarningTrace',
'StartScanRunRequest',
'StopScanRunRequest',
'UpdateScanConfigRequest',
'ViolatingResource',
'VulnerableHeaders',
'VulnerableParameters',
'WebSecurityScannerClient',
'Xss',
)
| apache-2.0 |
metalshark/lesscss-python | test/test_lessc.py | 1 | 2006 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of lesscss-python.
#
# lesscss-python is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# lesscss-python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with lesscss-python. If not, see <http://www.gnu.org/licenses/>.
"""
Copyright (c) 2011 Evgeny V. Generalov.
mailto:[email protected]
"""
import unittest
import sys
import subprocess
from lesscss import lessc
class TestLessc(unittest.TestCase):
def setUp(self):
self.python = sys.executable
self.lessc = lessc.__file__
def test_should_compile_a_file(self):
css = self._run([self.python, self.lessc, 'test_file.less'])
self.assertEqual(css, '''a { text-decoration: none; }''')
def test_should_compile_from_stdin(self):
less = '''a {text-decoration: none}'''
css = self._run([self.python, self.lessc], input=less)
self.assertEqual(css, '''a { text-decoration: none; }''')
def _run(self, cmd, input=None, *args, **kwargs):
proc= subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, *args, **kwargs)
return ''.join(proc.communicate(input=input))
def suite():
test_cases = (TestLessc,)
suite = unittest.TestSuite()
for tests in map(unittest.TestLoader().loadTestsFromTestCase, test_cases):
suite.addTests(tests)
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| gpl-3.0 |
wd5/jangr | _django/core/management/commands/startapp.py | 321 | 1909 | import os
from django.core.management.base import copy_helper, CommandError, LabelCommand
from django.utils.importlib import import_module
class Command(LabelCommand):
help = "Creates a Django app directory structure for the given app name in the current directory."
args = "[appname]"
label = 'application name'
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
def handle_label(self, app_name, directory=None, **options):
if directory is None:
directory = os.getcwd()
# Determine the project_name by using the basename of directory,
# which should be the full path of the project directory (or the
# current directory if no directory was passed).
project_name = os.path.basename(directory)
if app_name == project_name:
raise CommandError("You cannot create an app with the same name"
" (%r) as your project." % app_name)
# Check that the app_name cannot be imported.
try:
import_module(app_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing Python module and cannot be used as an app name. Please try another name." % app_name)
copy_helper(self.style, 'app', app_name, directory, project_name)
class ProjectCommand(Command):
help = ("Creates a Django app directory structure for the given app name"
" in this project's directory.")
def __init__(self, project_directory):
super(ProjectCommand, self).__init__()
self.project_directory = project_directory
def handle_label(self, app_name, **options):
super(ProjectCommand, self).handle_label(app_name, self.project_directory, **options)
| bsd-3-clause |
inmcm/Simon_Speck_Ciphers | Python/simonspeckciphers/simon/simon.py | 1 | 13367 | from __future__ import print_function
from collections import deque
__author__ = 'inmcm'
class SimonCipher(object):
"""Simon Block Cipher Object"""
# Z Arrays (stored bit reversed for easier usage)
z0 = 0b01100111000011010100100010111110110011100001101010010001011111
z1 = 0b01011010000110010011111011100010101101000011001001111101110001
z2 = 0b11001101101001111110001000010100011001001011000000111011110101
z3 = 0b11110000101100111001010001001000000111101001100011010111011011
z4 = 0b11110111001001010011000011101000000100011011010110011110001011
# valid cipher configurations stored:
# block_size:{key_size:(number_rounds,z sequence)}
__valid_setups = {32: {64: (32, z0)},
48: {72: (36, z0), 96: (36, z1)},
64: {96: (42, z2), 128: (44, z3)},
96: {96: (52, z2), 144: (54, z3)},
128: {128: (68, z2), 192: (69, z3), 256: (72, z4)}}
__valid_modes = ['ECB', 'CTR', 'CBC', 'PCBC', 'CFB', 'OFB']
def __init__(self, key, key_size=128, block_size=128, mode='ECB', init=0, counter=0):
"""
Initialize an instance of the Simon block cipher.
:param key: Int representation of the encryption key
:param key_size: Int representing the encryption key in bits
:param block_size: Int representing the block size in bits
:param mode: String representing which cipher block mode the object should initialize with
:param init: IV for CTR, CBC, PCBC, CFB, and OFB modes
:param counter: Initial Counter value for CTR mode
:return: None
"""
# Setup block/word size
try:
self.possible_setups = self.__valid_setups[block_size]
self.block_size = block_size
self.word_size = self.block_size >> 1
except KeyError:
print('Invalid block size!')
print('Please use one of the following block sizes:', [x for x in self.__valid_setups.keys()])
raise
# Setup Number of Rounds, Z Sequence, and Key Size
try:
self.rounds, self.zseq = self.possible_setups[key_size]
self.key_size = key_size
except KeyError:
print('Invalid key size for selected block size!!')
print('Please use one of the following key sizes:', [x for x in self.possible_setups.keys()])
raise
# Create Properly Sized bit mask for truncating addition and left shift outputs
self.mod_mask = (2 ** self.word_size) - 1
# Parse the given iv and truncate it to the block length
try:
self.iv = init & ((2 ** self.block_size) - 1)
self.iv_upper = self.iv >> self.word_size
self.iv_lower = self.iv & self.mod_mask
except (ValueError, TypeError):
print('Invalid IV Value!')
print('Please Provide IV as int')
raise
# Parse the given Counter and truncate it to the block length
try:
self.counter = counter & ((2 ** self.block_size) - 1)
except (ValueError, TypeError):
print('Invalid Counter Value!')
print('Please Provide Counter as int')
raise
# Check Cipher Mode
try:
position = self.__valid_modes.index(mode)
self.mode = self.__valid_modes[position]
except ValueError:
print('Invalid cipher mode!')
print('Please use one of the following block cipher modes:', self.__valid_modes)
raise
# Parse the given key and truncate it to the key length
try:
self.key = key & ((2 ** self.key_size) - 1)
except (ValueError, TypeError):
print('Invalid Key Value!')
print('Please Provide Key as int')
raise
# Pre-compile key schedule
m = self.key_size // self.word_size
self.key_schedule = []
# Create list of subwords from encryption key
k_init = [((self.key >> (self.word_size * ((m-1) - x))) & self.mod_mask) for x in range(m)]
k_reg = deque(k_init) # Use queue to manage key subwords
round_constant = self.mod_mask ^ 3 # Round Constant is 0xFFFF..FC
# Generate all round keys
for x in range(self.rounds):
rs_3 = ((k_reg[0] << (self.word_size - 3)) + (k_reg[0] >> 3)) & self.mod_mask
if m == 4:
rs_3 = rs_3 ^ k_reg[2]
rs_1 = ((rs_3 << (self.word_size - 1)) + (rs_3 >> 1)) & self.mod_mask
c_z = ((self.zseq >> (x % 62)) & 1) ^ round_constant
new_k = c_z ^ rs_1 ^ rs_3 ^ k_reg[m - 1]
self.key_schedule.append(k_reg.pop())
k_reg.appendleft(new_k)
def encrypt_round(self, x, y, k):
"""
Complete One Feistel Round
:param x: Upper bits of current plaintext
:param y: Lower bits of current plaintext
:param k: Round Key
:return: Upper and Lower ciphertext segments
"""
# Generate all circular shifts
ls_1_x = ((x >> (self.word_size - 1)) + (x << 1)) & self.mod_mask
ls_8_x = ((x >> (self.word_size - 8)) + (x << 8)) & self.mod_mask
ls_2_x = ((x >> (self.word_size - 2)) + (x << 2)) & self.mod_mask
# XOR Chain
xor_1 = (ls_1_x & ls_8_x) ^ y
xor_2 = xor_1 ^ ls_2_x
new_x = k ^ xor_2
return new_x, x
def decrypt_round(self, x, y, k):
"""Complete One Inverse Feistel Round
:param x: Upper bits of current ciphertext
:param y: Lower bits of current ciphertext
:param k: Round Key
:return: Upper and Lower plaintext segments
"""
# Generate all circular shifts
ls_1_y = ((y >> (self.word_size - 1)) + (y << 1)) & self.mod_mask
ls_8_y = ((y >> (self.word_size - 8)) + (y << 8)) & self.mod_mask
ls_2_y = ((y >> (self.word_size - 2)) + (y << 2)) & self.mod_mask
# Inverse XOR Chain
xor_1 = k ^ x
xor_2 = xor_1 ^ ls_2_y
new_x = (ls_1_y & ls_8_y) ^ xor_2
return y, new_x
def encrypt(self, plaintext):
"""
Process new plaintext into ciphertext based on current cipher object setup
:param plaintext: Int representing value to encrypt
:return: Int representing encrypted value
"""
try:
b = (plaintext >> self.word_size) & self.mod_mask
a = plaintext & self.mod_mask
except TypeError:
print('Invalid plaintext!')
print('Please provide plaintext as int')
raise
if self.mode == 'ECB':
b, a = self.encrypt_function(b, a)
elif self.mode == 'CTR':
true_counter = self.iv + self.counter
d = (true_counter >> self.word_size) & self.mod_mask
c = true_counter & self.mod_mask
d, c = self.encrypt_function(d, c)
b ^= d
a ^= c
self.counter += 1
elif self.mode == 'CBC':
b ^= self.iv_upper
a ^= self.iv_lower
b, a = self.encrypt_function(b, a)
self.iv_upper = b
self.iv_lower = a
self.iv = (b << self.word_size) + a
elif self.mode == 'PCBC':
f, e = b, a
b ^= self.iv_upper
a ^= self.iv_lower
b, a = self.encrypt_function(b, a)
self.iv_upper = b ^ f
self.iv_lower = a ^ e
self.iv = (self.iv_upper << self.word_size) + self.iv_lower
elif self.mode == 'CFB':
d = self.iv_upper
c = self.iv_lower
d, c = self.encrypt_function(d, c)
b ^= d
a ^= c
self.iv_upper = b
self.iv_lower = a
self.iv = (b << self.word_size) + a
elif self.mode == 'OFB':
d = self.iv_upper
c = self.iv_lower
d, c = self.encrypt_function(d, c)
self.iv_upper = d
self.iv_lower = c
self.iv = (d << self.word_size) + c
b ^= d
a ^= c
ciphertext = (b << self.word_size) + a
return ciphertext
def decrypt(self, ciphertext):
"""
Process new ciphertest into plaintext based on current cipher object setup
:param ciphertext: Int representing value to encrypt
:return: Int representing decrypted value
"""
try:
b = (ciphertext >> self.word_size) & self.mod_mask
a = ciphertext & self.mod_mask
except TypeError:
print('Invalid ciphertext!')
print('Please provide ciphertext as int')
raise
if self.mode == 'ECB':
a, b = self.decrypt_function(a, b)
elif self.mode == 'CTR':
true_counter = self.iv + self.counter
d = (true_counter >> self.word_size) & self.mod_mask
c = true_counter & self.mod_mask
d, c = self.encrypt_function(d, c)
b ^= d
a ^= c
self.counter += 1
elif self.mode == 'CBC':
f, e = b, a
a, b = self.decrypt_function(a, b)
b ^= self.iv_upper
a ^= self.iv_lower
self.iv_upper = f
self.iv_lower = e
self.iv = (f << self.word_size) + e
elif self.mode == 'PCBC':
f, e = b, a
a, b = self.decrypt_function(a, b)
b ^= self.iv_upper
a ^= self.iv_lower
self.iv_upper = (b ^ f)
self.iv_lower = (a ^ e)
self.iv = (self.iv_upper << self.word_size) + self.iv_lower
elif self.mode == 'CFB':
d = self.iv_upper
c = self.iv_lower
self.iv_upper = b
self.iv_lower = a
self.iv = (b << self.word_size) + a
d, c = self.encrypt_function(d, c)
b ^= d
a ^= c
elif self.mode == 'OFB':
d = self.iv_upper
c = self.iv_lower
d, c = self.encrypt_function(d, c)
self.iv_upper = d
self.iv_lower = c
self.iv = (d << self.word_size) + c
b ^= d
a ^= c
plaintext = (b << self.word_size) + a
return plaintext
def encrypt_function(self, upper_word, lower_word):
"""
Completes appropriate number of Simon Fiestel function to encrypt provided words
Round number is based off of number of elements in key schedule
upper_word: int of upper bytes of plaintext input
limited by word size of currently configured cipher
lower_word: int of lower bytes of plaintext input
limited by word size of currently configured cipher
x,y: int of Upper and Lower ciphertext words
"""
x = upper_word
y = lower_word
# Run Encryption Steps For Appropriate Number of Rounds
for k in self.key_schedule:
# Generate all circular shifts
ls_1_x = ((x >> (self.word_size - 1)) + (x << 1)) & self.mod_mask
ls_8_x = ((x >> (self.word_size - 8)) + (x << 8)) & self.mod_mask
ls_2_x = ((x >> (self.word_size - 2)) + (x << 2)) & self.mod_mask
# XOR Chain
xor_1 = (ls_1_x & ls_8_x) ^ y
xor_2 = xor_1 ^ ls_2_x
y = x
x = k ^ xor_2
return x,y
def decrypt_function(self, upper_word, lower_word):
"""
Completes appropriate number of Simon Fiestel function to decrypt provided words
Round number is based off of number of elements in key schedule
upper_word: int of upper bytes of ciphertext input
limited by word size of currently configured cipher
lower_word: int of lower bytes of ciphertext input
limited by word size of currently configured cipher
x,y: int of Upper and Lower plaintext words
"""
x = upper_word
y = lower_word
# Run Encryption Steps For Appropriate Number of Rounds
for k in reversed(self.key_schedule):
# Generate all circular shifts
ls_1_x = ((x >> (self.word_size - 1)) + (x << 1)) & self.mod_mask
ls_8_x = ((x >> (self.word_size - 8)) + (x << 8)) & self.mod_mask
ls_2_x = ((x >> (self.word_size - 2)) + (x << 2)) & self.mod_mask
# XOR Chain
xor_1 = (ls_1_x & ls_8_x) ^ y
xor_2 = xor_1 ^ ls_2_x
y = x
x = k ^ xor_2
return x,y
def update_iv(self, new_iv):
if new_iv:
try:
self.iv = new_iv & ((2 ** self.block_size) - 1)
self.iv_upper = self.iv >> self.word_size
self.iv_lower = self.iv & self.mod_mask
except TypeError:
print('Invalid Initialization Vector!')
print('Please provide IV as int')
raise
return self.iv
if __name__ == "__main__":
w = SimonCipher(0x1918111009080100, key_size=64, block_size=32)
t = w.encrypt(0x65656877)
print(hex(t))
| mit |
saisaizhang/Food | flask/lib/python2.7/site-packages/wtforms/ext/django/orm.py | 177 | 6096 | """
Tools for generating forms based on Django models.
"""
from wtforms import fields as f
from wtforms import Form
from wtforms import validators
from wtforms.compat import iteritems
from wtforms.ext.django.fields import ModelSelectField
__all__ = (
'model_fields', 'model_form',
)
class ModelConverterBase(object):
def __init__(self, converters):
self.converters = converters
def convert(self, model, field, field_args):
kwargs = {
'label': field.verbose_name,
'description': field.help_text,
'validators': [],
'filters': [],
'default': field.default,
}
if field_args:
kwargs.update(field_args)
if field.blank:
kwargs['validators'].append(validators.Optional())
if field.max_length is not None and field.max_length > 0:
kwargs['validators'].append(validators.Length(max=field.max_length))
ftype = type(field).__name__
if field.choices:
kwargs['choices'] = field.choices
return f.SelectField(**kwargs)
elif ftype in self.converters:
return self.converters[ftype](model, field, kwargs)
else:
converter = getattr(self, 'conv_%s' % ftype, None)
if converter is not None:
return converter(model, field, kwargs)
class ModelConverter(ModelConverterBase):
DEFAULT_SIMPLE_CONVERSIONS = {
f.IntegerField: ['AutoField', 'IntegerField', 'SmallIntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField'],
f.DecimalField: ['DecimalField', 'FloatField'],
f.FileField: ['FileField', 'FilePathField', 'ImageField'],
f.DateTimeField: ['DateTimeField'],
f.DateField: ['DateField'],
f.BooleanField: ['BooleanField'],
f.TextField: ['CharField', 'PhoneNumberField', 'SlugField'],
f.TextAreaField: ['TextField', 'XMLField'],
}
def __init__(self, extra_converters=None, simple_conversions=None):
converters = {}
if simple_conversions is None:
simple_conversions = self.DEFAULT_SIMPLE_CONVERSIONS
for field_type, django_fields in iteritems(simple_conversions):
converter = self.make_simple_converter(field_type)
for name in django_fields:
converters[name] = converter
if extra_converters:
converters.update(extra_converters)
super(ModelConverter, self).__init__(converters)
def make_simple_converter(self, field_type):
def _converter(model, field, kwargs):
return field_type(**kwargs)
return _converter
def conv_ForeignKey(self, model, field, kwargs):
return ModelSelectField(model=field.rel.to, **kwargs)
def conv_TimeField(self, model, field, kwargs):
def time_only(obj):
try:
return obj.time()
except AttributeError:
return obj
kwargs['filters'].append(time_only)
return f.DateTimeField(format='%H:%M:%S', **kwargs)
def conv_EmailField(self, model, field, kwargs):
kwargs['validators'].append(validators.email())
return f.TextField(**kwargs)
def conv_IPAddressField(self, model, field, kwargs):
kwargs['validators'].append(validators.ip_address())
return f.TextField(**kwargs)
def conv_URLField(self, model, field, kwargs):
kwargs['validators'].append(validators.url())
return f.TextField(**kwargs)
def conv_NullBooleanField(self, model, field, kwargs):
from django.db.models.fields import NOT_PROVIDED
def coerce_nullbool(value):
d = {'None': None, None: None, 'True': True, 'False': False}
if isinstance(value, NOT_PROVIDED):
return None
elif value in d:
return d[value]
else:
return bool(int(value))
choices = ((None, 'Unknown'), (True, 'Yes'), (False, 'No'))
return f.SelectField(choices=choices, coerce=coerce_nullbool, **kwargs)
def model_fields(model, only=None, exclude=None, field_args=None, converter=None):
"""
Generate a dictionary of fields for a given Django model.
See `model_form` docstring for description of parameters.
"""
converter = converter or ModelConverter()
field_args = field_args or {}
model_fields = ((f.attname, f) for f in model._meta.fields)
if only:
model_fields = (x for x in model_fields if x[0] in only)
elif exclude:
model_fields = (x for x in model_fields if x[0] not in exclude)
field_dict = {}
for name, model_field in model_fields:
field = converter.convert(model, model_field, field_args.get(name))
if field is not None:
field_dict[name] = field
return field_dict
def model_form(model, base_class=Form, only=None, exclude=None, field_args=None, converter=None):
"""
Create a wtforms Form for a given Django model class::
from wtforms.ext.django.orm import model_form
from myproject.myapp.models import User
UserForm = model_form(User)
:param model:
A Django ORM model class
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments used
to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
field_dict = model_fields(model, only, exclude, field_args, converter)
return type(model._meta.object_name + 'Form', (base_class, ), field_dict)
| bsd-3-clause |
bakerlover/project4 | lib/flask/blueprints.py | 773 | 16320 | # -*- coding: utf-8 -*-
"""
flask.blueprints
~~~~~~~~~~~~~~~~
Blueprints are the recommended way to implement larger or more
pluggable applications in Flask 0.7 and later.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from .helpers import _PackageBoundObject, _endpoint_from_view_func
class BlueprintSetupState(object):
"""Temporary holder object for registering a blueprint with the
application. An instance of this class is created by the
:meth:`~flask.Blueprint.make_setup_state` method and later passed
to all register callback functions.
"""
def __init__(self, blueprint, app, options, first_registration):
#: a reference to the current application
self.app = app
#: a reference to the blueprint that created this setup state.
self.blueprint = blueprint
#: a dictionary with all options that were passed to the
#: :meth:`~flask.Flask.register_blueprint` method.
self.options = options
#: as blueprints can be registered multiple times with the
#: application and not everything wants to be registered
#: multiple times on it, this attribute can be used to figure
#: out if the blueprint was registered in the past already.
self.first_registration = first_registration
subdomain = self.options.get('subdomain')
if subdomain is None:
subdomain = self.blueprint.subdomain
#: The subdomain that the blueprint should be active for, `None`
#: otherwise.
self.subdomain = subdomain
url_prefix = self.options.get('url_prefix')
if url_prefix is None:
url_prefix = self.blueprint.url_prefix
#: The prefix that should be used for all URLs defined on the
#: blueprint.
self.url_prefix = url_prefix
#: A dictionary with URL defaults that is added to each and every
#: URL that was defined with the blueprint.
self.url_defaults = dict(self.blueprint.url_values_defaults)
self.url_defaults.update(self.options.get('url_defaults', ()))
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
"""
if self.url_prefix:
rule = self.url_prefix + rule
options.setdefault('subdomain', self.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = self.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
class Blueprint(_PackageBoundObject):
"""Represents a blueprint. A blueprint is an object that records
functions that will be called with the
:class:`~flask.blueprint.BlueprintSetupState` later to register functions
or other things on the main application. See :ref:`blueprints` for more
information.
.. versionadded:: 0.7
"""
warn_on_modifications = False
_got_registered_once = False
def __init__(self, name, import_name, static_folder=None,
static_url_path=None, template_folder=None,
url_prefix=None, subdomain=None, url_defaults=None):
_PackageBoundObject.__init__(self, import_name, template_folder)
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self.static_folder = static_folder
self.static_url_path = static_url_path
self.deferred_functions = []
self.view_functions = {}
if url_defaults is None:
url_defaults = {}
self.url_values_defaults = url_defaults
def record(self, func):
"""Registers a function that is called when the blueprint is
registered on the application. This function is called with the
state as argument as returned by the :meth:`make_setup_state`
method.
"""
if self._got_registered_once and self.warn_on_modifications:
from warnings import warn
warn(Warning('The blueprint was already registered once '
'but is getting modified now. These changes '
'will not show up.'))
self.deferred_functions.append(func)
def record_once(self, func):
"""Works like :meth:`record` but wraps the function in another
function that will ensure the function is only called once. If the
blueprint is registered a second time on the application, the
function passed is not called.
"""
def wrapper(state):
if state.first_registration:
func(state)
return self.record(update_wrapper(wrapper, func))
def make_setup_state(self, app, options, first_registration=False):
"""Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
object that is later passed to the register callback functions.
Subclasses can override this to return a subclass of the setup state.
"""
return BlueprintSetupState(self, app, options, first_registration)
def register(self, app, options, first_registration=False):
"""Called by :meth:`Flask.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
:func:`~flask.Flask.register_blueprint` are directly forwarded to this
method in the `options` dictionary.
"""
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder:
state.add_url_rule(self.static_url_path + '/<path:filename>',
view_func=self.send_static_file,
endpoint='static')
for deferred in self.deferred_functions:
deferred(state)
def route(self, rule, **options):
"""Like :meth:`Flask.route` but for a blueprint. The endpoint for the
:func:`url_for` function is prefixed with the name of the blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
the :func:`url_for` function is prefixed with the name of the blueprint.
"""
if endpoint:
assert '.' not in endpoint, "Blueprint endpoint's should not contain dot's"
self.record(lambda s:
s.add_url_rule(rule, endpoint, view_func, **options))
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator
def app_template_filter(self, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.template_filter` but for a blueprint.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_filter(f, name=name)
return f
return decorator
def add_app_template_filter(self, f, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.add_template_filter` but for a blueprint. Works exactly
like the :meth:`app_template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.filters[name or f.__name__] = f
self.record_once(register_template)
def app_template_test(self, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.template_test` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_test(f, name=name)
return f
return decorator
def add_app_template_test(self, f, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.add_template_test` but for a blueprint. Works exactly
like the :meth:`app_template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.tests[name or f.__name__] = f
self.record_once(register_template)
def app_template_global(self, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.template_global` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_global(f, name=name)
return f
return decorator
def add_app_template_global(self, f, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.add_template_global` but for a blueprint. Works exactly
like the :meth:`app_template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.globals[name or f.__name__] = f
self.record_once(register_template)
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like :meth:`Flask.before_request`. Such a function is executed
before each request, even if outside of a blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def before_app_first_request(self, f):
"""Like :meth:`Flask.before_first_request`. Such a function is
executed before the first request to the application.
"""
self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
return f
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def teardown_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. This
function is only executed when tearing down requests handled by a
function of that blueprint. Teardown request functions are executed
when the request context is popped, even when no actual request was
performed.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(self.name, []).append(f))
return f
def teardown_app_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. Such a
function is executed when tearing down each request, even if outside of
the blueprint.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(None, []).append(f))
return f
def context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. This
function is only executed for requests handled by a blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(self.name, []).append(f))
return f
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record_once(lambda s: s.app.errorhandler(code)(f))
return f
return decorator
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for this
blueprint. It's called before the view functions are called and
can modify the url values provided.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(self.name, []).append(f))
return f
def url_defaults(self, f):
"""Callback function for URL defaults for this blueprint. It's called
with the endpoint and values and should update the values passed
in place.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(self.name, []).append(f))
return f
def app_url_value_preprocessor(self, f):
"""Same as :meth:`url_value_preprocessor` but application wide.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(None, []).append(f))
return f
def app_url_defaults(self, f):
"""Same as :meth:`url_defaults` but application wide.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(None, []).append(f))
return f
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
"""
def decorator(f):
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator
| apache-2.0 |
sxjscience/tvm | apps/topi_recipe/rnn/lstm.py | 4 | 7638 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""LSTM Example, still work in progress.."""
import tvm
from tvm import te
import os
from tvm.contrib import nvcc
import numpy as np
# Quick knobs
TASK = "lstm"
USE_MANUAL_CODE = False
PERSIST_KERNEL = True
DETECT_GLOBAL_BARRIER = PERSIST_KERNEL
SKIP_CHECK = False
UNROLL_WLOAD = True
@tvm.register_func
def tvm_callback_cuda_compile(code):
"""Use nvcc compiler for better perf."""
ptx = nvcc.compile_cuda(code, target="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def lstm():
if not PERSIST_KERNEL:
raise ValueError("Non persist LSTM not yet supported")
num_thread_y = 8
num_thread_x = 16 * 3 // 2
num_sm = 24
n_num_step = 128
num_step = te.var("num_step")
num_hidden = 1152 // 2
batch_size = 1
# Global transition matrix
# Input hidden channel can be pre-caculated by a gemm
Xi2h = te.placeholder((num_step, batch_size, 4, num_hidden), name="Xi2h")
# Only handle hidden transition, saves space.
Wh2h = te.placeholder((4, num_hidden, num_hidden), name="Wh2h")
# h: output hidden state, c: cell state.
s_state_h = te.placeholder((num_step, batch_size, num_hidden))
s_state_c = te.placeholder((num_step, batch_size, num_hidden))
s_init_c = te.compute((1, batch_size, num_hidden), lambda *i: 0.0, name="init_c")
s_init_h = te.compute((1, batch_size, num_hidden), lambda *i: 0.0, name="init_h")
# LSTM transition
k = te.reduce_axis((0, num_hidden), name="ki2h")
s_h2h = te.compute(
(num_step, batch_size, 4, num_hidden),
lambda t, i, x, j: te.sum(s_state_h[t - 1, i, k] * Wh2h[x, j, k], axis=k),
name="s_h2h",
)
# Gate rules
gates = te.compute(Xi2h.shape, lambda *i: Xi2h(*i) + s_h2h(*i), name="gates")
gshape = (num_step, batch_size, num_hidden)
in_gate = te.compute(gshape, lambda t, i, j: te.sigmoid(gates[t, i, 0, j]), name="in_gate")
in_transform = te.compute(
gshape, lambda t, i, j: te.tanh(gates[t, i, 1, j]), name="in_transform"
)
forget_gate = te.compute(
gshape, lambda t, i, j: te.sigmoid(gates[t, i, 2, j]), name="forget_gate"
)
out_gate = te.compute(gshape, lambda t, i, j: te.sigmoid(gates[t, i, 3, j]), name="out_gate")
next_c = te.compute(
gshape,
lambda t, i, j: forget_gate[t, i, j] * s_state_c[t - 1, i, j]
+ in_gate[t, i, j] * in_transform[t, i, j],
name="next_c",
)
next_h = te.compute(
gshape, lambda t, i, j: out_gate[t, i, j] * te.tanh(next_c[t, i, j]), name="next_h"
)
update_c = te.compute(gshape, lambda *i: next_c(*i), name="update_c")
update_h = te.compute(gshape, lambda *i: next_h(*i), name="update_h")
# schedule
scan_h, scan_c = tvm.te.scan(
[s_init_h, s_init_c],
[update_h, update_c],
[s_state_h, s_state_c],
inputs=[Xi2h],
name="lstm_scan",
)
# schedule
s = te.create_schedule(scan_h.op)
# Inline gate computations
s[gates].compute_inline()
s[in_gate].compute_inline()
s[in_transform].compute_inline()
s[forget_gate].compute_inline()
s[out_gate].compute_inline()
block_x = te.thread_axis((0, num_sm), "blockIdx.x")
thread_x = te.thread_axis((0, num_thread_x), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread_y), "threadIdx.y")
s_state_h_S = s.cache_read(s_state_h, "shared", [s_h2h])
s_state_c_S = s.cache_read(s_state_c, "shared", [next_c])
Wh2hL = s.cache_read(Wh2h, "local", [s_h2h])
ko, ki = s[s_h2h].split(s[s_h2h].op.reduce_axis[0], nparts=num_thread_y)
s_h2h_rf = s.rfactor(s_h2h, ko)
s[s_h2h].bind(s[s_h2h].op.reduce_axis[0], thread_y)
s[s_h2h_rf].compute_at(s[s_h2h], s[s_h2h].op.reduce_axis[0])
if PERSIST_KERNEL:
s[scan_h.op].env_threads([block_x, thread_y, thread_x])
s[Wh2hL].compute_at(s[scan_h.op], thread_x)
else:
s[Wh2hL].compute_at(s[s_h2h], s[s_h2h].op.axis[3])
if UNROLL_WLOAD:
s[Wh2hL].unroll(Wh2hL.op.axis[0])
s[Wh2hL].unroll(Wh2hL.op.axis[2])
s[s_state_h_S].compute_at(s[s_h2h_rf], s[s_h2h_rf].op.axis[3])
s[s_state_c_S].compute_at(s[scan_h.op], s[scan_h].op.scan_axis)
for ss in [s_state_h_S]:
xo, xi = s[ss].split(ss.op.axis[2], factor=num_thread_x * num_thread_y)
ty, xi = s[ss].split(xi, nparts=num_thread_y)
tx, xi = s[ss].split(xi, nparts=num_thread_x)
s[ss].bind(ty, thread_y)
s[ss].bind(tx, thread_x)
for init in [s_init_c, s_init_h]:
bx, xi = s[init].split(init.op.axis[2], nparts=num_sm)
tx, xi = s[init].split(xi, nparts=num_thread_x)
s[init].bind(bx, block_x)
s[init].bind(tx, thread_x)
s[next_c].set_store_predicate(thread_y.equal(0))
s[next_h].set_store_predicate(thread_y.equal(0))
for update in [update_c, update_h]:
bx, xi = s[update].split(s[update].op.axis[2], nparts=num_sm)
tx, xi = s[update].split(xi, nparts=num_thread_x)
s[update].bind(bx, block_x)
s[update].bind(tx, thread_x)
s[update].set_store_predicate(thread_y.equal(0))
# verify we can lower correctly
def check_device(target):
num_step = n_num_step
flstm = tvm.build(s, [Xi2h, Wh2h, scan_h, scan_c], target)
ctx = tvm.gpu(0) if target == "cuda" else tvm.cl(0)
# launch the kernel.
scan_h_np = np.zeros((num_step, batch_size, num_hidden)).astype("float32")
scan_c_np = np.zeros((num_step, batch_size, num_hidden)).astype("float32")
Xi2h_np = np.random.normal(size=(num_step, batch_size, 4, num_hidden)).astype("float32")
Wh2h_np = np.random.normal(size=(4, num_hidden, num_hidden)).astype("float32")
scan_h_a = tvm.nd.array(scan_h_np, ctx)
scan_c_a = tvm.nd.array(scan_c_np, ctx)
Xi2h_a = tvm.nd.array(Xi2h_np, ctx)
Wh2h_a = tvm.nd.array(Wh2h_np, ctx)
flstm(Xi2h_a, Wh2h_a, scan_h_a, scan_c_a)
ctx.sync()
# measure time cost of second step.
evaluator = flstm.time_evaluator(flstm.entry_name, ctx, 1, repeat=1000)
eval_result = evaluator(Xi2h_a, Wh2h_a, scan_h_a, scan_c_a)
print("Time cost=%g" % eval_result.mean)
# set unroll_explicit for more readable code.
with tvm.transform.PassContext(
config={
"tir.UnrollLoop": {
"auto_max_step": 128,
},
"tir.detect_global_barrier": DETECT_GLOBAL_BARRIER,
}
):
check_device("cuda")
if __name__ == "__main__":
lstm()
| apache-2.0 |
rohit12/opencog | opencog/python/spatiotemporal/temporal_events/composition/depth_first_search_composition.py | 33 | 7461 | from copy import deepcopy
from math import fabs, sqrt
from spatiotemporal.temporal_events.composition.railway_framework import RailwaySystem, EPSILON
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from utility.functions import almost_equals
def unpack(relation, start_reference=0, length_reference=1):
before, same, after = relation
similarity = same / (1 - fabs(before - after))
solutions = []
lengths = length_reference / similarity**2, length_reference * similarity**2
for i in xrange(2):
length_solution = lengths[i]
starts = [
start_reference - (length_solution*before - 0.5*length_reference),
start_reference + length_reference - (length_reference*before + 0.5*length_solution)
]
start_solution = starts[i]
comparison_operand = 0.5 * (length_reference / length_solution)**((-1)**i)
if before <= comparison_operand:
start_solution = start_reference + length_reference - sqrt(2*before*length_solution*length_reference)
elif before > 1 - comparison_operand:
start_solution = start_reference - length_solution + sqrt(2*after*length_solution*length_reference)
if i == 0 or i == 1 and solutions[0] != (start_solution, start_solution + length_solution):
solution_a, solution_b = round(start_solution, 15), round(start_solution + length_solution, 15)
solutions.append((solution_a, solution_b))
return solutions
class Node(object):
def __init__(self, rails, unpack_states):
self.rails = rails
self.unpack_states = unpack_states
def is_solution(node):
for state in node.unpack_states.values():
if not state:
return False
return True
def expand(node, relations):
successors = []
rails = node.rails
new_unpack_states = deepcopy(node.unpack_states)
relation = None
for key, unpacked in node.unpack_states.items():
if not unpacked:
relation = key
new_unpack_states[relation] = True
break
if relation is None:
return []
temporal_event_1_key, portion_index_1, temporal_event_2_key, portion_index_2 = relation
relation = relations[relation]
start_reference = rails[temporal_event_2_key][portion_index_2].a
length_reference = rails[temporal_event_2_key][portion_index_2].length
if relation == (1.0, 0.0, 0.0):
new_rails = deepcopy(rails)
new_rails.bind_wagons_before_horizontal(temporal_event_1_key, portion_index_1, temporal_event_2_key,
portion_index_2)
return [Node(new_rails, new_unpack_states)]
if relation == (0.0, 0.0, 1.0):
new_rails = deepcopy(rails)
new_rails.bind_wagons_after_horizontal(temporal_event_1_key, portion_index_1, temporal_event_2_key,
portion_index_2)
return [Node(new_rails, new_unpack_states)]
candidates = unpack(relation, start_reference, length_reference)
for a, b in candidates:
reference = rails[temporal_event_2_key][portion_index_2]
new_rails = deepcopy(rails)
new_rails.move_and_bind_vertical(temporal_event_1_key, portion_index_1, temporal_event_2_key, portion_index_2,
a, b)
new_reference = new_rails[temporal_event_2_key][portion_index_2]
if not almost_equals(new_reference.a, reference.a, EPSILON) or\
not almost_equals(new_reference.b, reference.b, EPSILON):
continue
successors.append(Node(new_rails, deepcopy(new_unpack_states)))
return successors
class DepthFirstSearchComposition(object):
def __init__(self):
self.stack = []
self.relations = {}
self.events = set()
def add_relation(self, temporal_event_1_key, portion_index_1,
temporal_event_2_key, portion_index_2, relation):
self.events.add(temporal_event_1_key)
self.events.add(temporal_event_2_key)
self.relations[temporal_event_1_key, portion_index_1, temporal_event_2_key, portion_index_2] = relation
def find_solutions(self, initial_system):
solutions = []
initial_unpack_states = {}
for key in self.relations:
initial_unpack_states[key] = False
start = Node(initial_system, initial_unpack_states)
stack = [start]
while stack:
node = stack.pop()
if is_solution(node):
solutions.append(node.rails)
stack += expand(node, self.relations)
return solutions
def convert_rail_to_trapezium_event(railway_system, rail_key):
a = railway_system[rail_key][0].a
beginning = railway_system[rail_key][0].b
ending = railway_system[rail_key][1].a
b = railway_system[rail_key][1].b
return TemporalEventTrapezium(a, b, beginning, ending)
if __name__ == '__main__':
from spatiotemporal.temporal_events.trapezium import generate_random_events
from spatiotemporal.temporal_events.util import compute_railway_strength
import numpy
from spatiotemporal.temporal_events import RelationFormulaConvolution
search_tree = DepthFirstSearchComposition()
formula = RelationFormulaConvolution()
A, B, C = generate_random_events(3)
for event in [A, B, C]:
p = ''
for point in [event.a, event.b, event.beginning, event.ending]:
p += str((point - A.a) / (A.beginning - A.a)) + ', '
print p
# A = TemporalEventTrapezium(0, 30, 10, 20)
# B = TemporalEventTrapezium(1, 9, 2, 8)
# C = TemporalEventTrapezium(0, 30, 10, 20)
actual_solution = (A * C).to_vector()
print 'Actual\n', actual_solution
goal = []
events = {'A': A, 'B': B, 'C': C}
for a_key, b_key in [('A', 'B'), ('B', 'C')]:
a, b = events[a_key], events[b_key]
for portion_index_a in [0, 1]:
for portion_index_b in [0, 1]:
relation = formula.compare(a[portion_index_a], b[portion_index_b])
goal.append(relation)
search_tree.add_relation(a_key, portion_index_a, b_key, portion_index_b, relation)
goal = numpy.array(goal)
rails = RailwaySystem()
for event_key in events:
rails.add_rail(event_key)
solutions = search_tree.find_solutions(rails)
average_solution = numpy.zeros(13)
print 'Solutions'
for railway_system in solutions:
railway_system.compress()
estimate = []
A = convert_rail_to_trapezium_event(railway_system, 'A')
B = convert_rail_to_trapezium_event(railway_system, 'B')
C = convert_rail_to_trapezium_event(railway_system, 'C')
solution = (A * C).to_vector()
print solution
average_solution += solution / len(solutions)
events = {'A': A, 'B': B, 'C': C}
for a_key, b_key in [('A', 'B'), ('B', 'C')]:
a, b = events[a_key], events[b_key]
for portion_index_a in [0, 1]:
for portion_index_b in [0, 1]:
relation = formula.compare(a[portion_index_a], b[portion_index_b])
estimate.append(relation)
# print goal - numpy.array(estimate)
print compute_railway_strength(solutions, goals=[('A', 'C')])
print 'Average\n', average_solution
print 'Error\n', actual_solution - average_solution
| agpl-3.0 |
dsacre/mididings | mididings/util.py | 2 | 10818 | # -*- coding: utf-8 -*-
#
# mididings
#
# Copyright (C) 2008-2014 Dominic Sacré <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
import mididings.misc as _misc
import mididings.constants as _constants
import mididings.setup as _setup
import sys as _sys
_NOTE_NUMBERS = {
'c': 0,
'c#': 1, 'db': 1,
'd': 2,
'd#': 3, 'eb': 3,
'e': 4,
'f': 5,
'f#': 6, 'gb': 6,
'g': 7,
'g#': 8, 'ab': 8,
'a': 9,
'a#': 10, 'bb': 10,
'b': 11,
}
_NOTE_NAMES = {
0: 'c',
1: 'c#',
2: 'd',
3: 'd#',
4: 'e',
5: 'f',
6: 'f#',
7: 'g',
8: 'g#',
9: 'a',
10: 'a#',
11: 'b',
}
_CONTROLLER_NAMES = {
0: 'Bank select (MSB)',
1: 'Modulation',
6: 'Data entry (MSB)',
7: 'Volume',
10: 'Pan',
11: 'Expression',
32: 'Bank select (LSB)',
38: 'Data entry (LSB)',
64: 'Sustain',
65: 'Portamento',
66: 'Sostenuto',
67: 'Soft pedal',
68: 'Legato pedal',
98: 'NRPN (LSB)',
99: 'NRPN (MSB)',
100: 'RPN (LSB)',
101: 'RPN (MSB)',
121: 'Reset all controllers',
123: 'All notes off',
}
def note_number(note, allow_end=False):
"""
note_number(note)
Convert note name to note number.
:param note:
any valid :ref:`note description <notes>` (name or number).
:return:
MIDI note number.
"""
if isinstance(note, int):
r = note
elif isinstance(note, str):
note = note.lower()
# find first digit
for i in range(len(note)):
if note[i].isdigit() or note[i] == '-':
break
try:
name = note[:i]
octave = int(note[i:])
r = (_NOTE_NUMBERS[name] +
(octave + _setup.get_config('octave_offset')) * 12)
except Exception:
raise ValueError("invalid note name '%s'" % note)
else:
raise TypeError("note must be an integer or string")
end = 128 if not allow_end else 129
if not (0 <= r < end):
raise ValueError("note number %d is out of range" % r)
return r
def note_limit(note):
return note_number(note, allow_end=True)
def note_range(notes):
"""
Convert note range to note numbers.
:param notes:
any valid :ref:`note range <notes>`
(names or numbers, tuple or string).
If this is a single note, the range containing only that note is
returned.
:return:
tuple of two MIDI note numbers.
"""
try:
# single note
n = note_number(notes)
return (n, n + 1)
except Exception:
try:
if isinstance(notes, tuple):
# tuple of note numbers
return note_limit(notes[0]), note_limit(notes[1])
elif isinstance(notes, str):
# note range string
nn = notes.split(':', 1)
lower = note_limit(nn[0]) if nn[0] else 0
upper = note_limit(nn[1]) if nn[1] else 0
return lower, upper
else:
raise TypeError("note range must be a tuple"
" of integers or a string")
except (ValueError, IndexError):
raise ValueError("invalid note range %r" % notes)
def note_name(note):
"""
Get note name from note number.
:param note:
a MIDI note number.
:return:
note name as a string.
"""
if not isinstance(note, int):
raise TypeError("note must be an integer")
return _NOTE_NAMES[note % 12] + str(
(note // 12) - _setup.get_config('octave_offset'))
def tonic_note_number(key):
return _NOTE_NUMBERS[key]
def controller_name(ctrl):
"""
Get controller description.
"""
if ctrl in _CONTROLLER_NAMES:
return _CONTROLLER_NAMES[ctrl]
else:
return None
def event_type(type):
if type not in _constants._EVENT_TYPES:
raise ValueError("invalid event type %r" % type)
return type
def port_number(port):
"""
Convert port description to port number.
:param port:
a :ref:`port name <ports>` or number.
:return:
the port's number.
"""
if isinstance(port, int):
if actual(port) < 0:
raise ValueError("invalid port number %d" % port)
return port
elif isinstance(port, str):
in_ports = _setup._in_portnames
out_ports = _setup._out_portnames
if (port in in_ports and port in out_ports and
in_ports.index(port) != out_ports.index(port)):
raise ValueError("port name '%s' is ambiguous" % port)
elif port in in_ports:
return offset(in_ports.index(port))
elif port in out_ports:
return offset(out_ports.index(port))
else:
raise ValueError("invalid port name '%s'" % port)
else:
raise TypeError("port must be an integer or string")
def channel_number(channel):
if not isinstance(channel, int):
raise TypeError("channel must be an integer")
if not (0 <= actual(channel) < 16):
raise ValueError("channel number %d is out of range" % channel)
return channel
def program_number(program):
if not isinstance(program, int):
raise TypeError("program must be an integer")
if not (0 <= actual(program) < 128):
raise ValueError("program number %d is out of range" % program)
return program
def ctrl_number(ctrl):
if not isinstance(ctrl, int):
raise TypeError("controller must be an integer")
if not (0 <= ctrl < 128):
raise ValueError("controller number %d is out of range" % ctrl)
return ctrl
def ctrl_value(value, allow_end=False):
if not isinstance(value, int):
raise TypeError("controller value must be an integer")
end = 128 if not allow_end else 129
if not (0 <= value < end):
raise ValueError("controller value %d is out of range" % value)
return value
def ctrl_limit(value):
return ctrl_value(value, allow_end=True)
def ctrl_range(value):
try:
n = ctrl_value(value)
return (n, n + 1)
except Exception:
if isinstance(value, tuple) and len(value) == 2:
return (ctrl_limit(value[0]), ctrl_limit(value[1]))
raise ValueError("invalid controller value range %r" % value)
def velocity_value(velocity, allow_end=False):
if not isinstance(velocity, int):
raise TypeError("velocity must be an integer")
end = 128 if not allow_end else 129
if not (0 <= velocity < end):
raise ValueError("velocity %d is out of range" % velocity)
return velocity
def velocity_limit(velocity):
return velocity_value(velocity, allow_end=True)
def velocity_range(velocity):
try:
n = velocity_value(velocity)
return (n, n + 1)
except Exception:
if isinstance(velocity, tuple) and len(velocity) == 2:
return (velocity_limit(velocity[0]), velocity_limit(velocity[1]))
raise ValueError("invalid velocity range %r" % velocity)
def scene_number(scene):
if not isinstance(scene, int):
raise TypeError("scene number must be an integer")
if actual(scene) < 0:
raise ValueError("scene number %d is out of range" % scene)
return scene
def subscene_number(subscene):
if not isinstance(subscene, int):
raise TypeError("subscene number must be an integer")
if actual(subscene) < 0:
raise ValueError("subscene number %d is out of range" % subscene)
return subscene
def sysex_to_bytearray(sysex):
if isinstance(sysex, str):
if sysex.startswith('F0') or sysex.startswith('f0'):
return bytearray(int(x, 16) for x in sysex.split(sysex[2]))
else:
return bytearray(map(ord, sysex))
elif isinstance(sysex, bytearray):
return sysex
else:
return bytearray(sysex)
def sysex_data(sysex, allow_partial=False):
sysex = sysex_to_bytearray(sysex)
if len(sysex) < 2:
raise ValueError("sysex too short")
elif sysex[0] != 0xf0:
raise ValueError("sysex doesn't start with F0")
elif sysex[-1] != 0xf7 and not allow_partial:
raise ValueError("sysex doesn't end with F7")
else:
for c in sysex[1:-1]:
if c > 0x7f:
raise ValueError("sysex data byte %#x is out of range" % c)
return sysex
def sysex_manufacturer(manufacturer):
if not _misc.issequence(manufacturer, True):
manufacturer = [manufacturer]
manid = sysex_to_bytearray(manufacturer)
if len(manid) not in (1, 3):
raise ValueError("manufacturer id must be either one or three bytes")
elif len(manid) == 3 and manid[0] != 0x00:
raise ValueError("three-byte manufacturer id must start with 0x00")
else:
for c in manid:
if c > 0x7f:
raise ValueError("manufacturer id byte %#x is out of range" % c)
return manid
class NoDataOffset(int):
"""
An integer type that's unaffected by data offset conversions.
"""
def __new__(cls, value):
return int.__new__(cls, value)
def __repr__(self):
return 'NoDataOffset(%d)' % int(self)
def __str__(self):
return 'NoDataOffset(%d)' % int(self)
def offset(n):
"""
Add current data offset.
"""
return n + _setup.get_config('data_offset')
def actual(n):
"""
Subtract current data offset to get the "real" value used on the C++ side.
"""
if isinstance(n, NoDataOffset):
return int(n)
else:
return n - _setup.get_config('data_offset')
# define wrappers around parameter check functions that will also accept
# event attribute references
def _allow_event_attribute(f):
def func(first, *args, **kwargs):
if isinstance(first, _constants._EventAttribute):
return first
else:
return f(first, *args, **kwargs)
return func
port_number_ref = _allow_event_attribute(port_number)
channel_number_ref = _allow_event_attribute(channel_number)
note_number_ref = _allow_event_attribute(note_number)
velocity_value_ref = _allow_event_attribute(velocity_value)
ctrl_number_ref = _allow_event_attribute(ctrl_number)
ctrl_value_ref = _allow_event_attribute(ctrl_value)
program_number_ref = _allow_event_attribute(program_number)
scene_number_ref = _allow_event_attribute(scene_number)
subscene_number_ref = _allow_event_attribute(subscene_number)
actual_ref = _allow_event_attribute(actual)
| gpl-2.0 |
AndKe/ardupilot | Tools/LogAnalyzer/tests/TestAutotune.py | 21 | 4797 | from LogAnalyzer import Test, TestResult
import DataflashLog
from VehicleType import VehicleType
# from ArduCopter/defines.h
AUTOTUNE_INITIALISED = 30
AUTOTUNE_OFF = 31
AUTOTUNE_RESTART = 32
AUTOTUNE_SUCCESS = 33
AUTOTUNE_FAILED = 34
AUTOTUNE_REACHED_LIMIT = 35
AUTOTUNE_PILOT_TESTING = 36
AUTOTUNE_SAVEDGAINS = 37
AUTOTUNE_EVENTS = frozenset([AUTOTUNE_INITIALISED,
AUTOTUNE_OFF,
AUTOTUNE_RESTART,
AUTOTUNE_SUCCESS,
AUTOTUNE_FAILED,
AUTOTUNE_REACHED_LIMIT,
AUTOTUNE_PILOT_TESTING,
AUTOTUNE_SAVEDGAINS])
class TestAutotune(Test):
'''test for autotune success (copter only)'''
class AutotuneSession(object):
def __init__(self, events):
self.events = events
@property
def linestart(self):
return self.events[0][0]
@property
def linestop(self):
return self.events[-1][0]
@property
def success(self):
return AUTOTUNE_SUCCESS in [i for _,i in self.events]
@property
def failure(self):
return AUTOTUNE_FAILED in [i for _,i in self.events]
@property
def limit(self):
return AUTOTUNE_REACHED_LIMIT in [i for _,i in self.events]
def __repr__(self):
return "<AutotuneSession {}-{}>".format(self.linestart,self.linestop)
def __init__(self):
Test.__init__(self)
self.name = "Autotune"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
if logdata.vehicleType != VehicleType.Copter:
self.result.status = TestResult.StatusType.NA
return
for i in ['EV','ATDE','ATUN']:
r = False
if not i in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No {} log data".format(i)
r = True
if r:
return
events = list(filter(lambda x: x[1] in AUTOTUNE_EVENTS, logdata.channels["EV"]["Id"].listData))
attempts = []
j = None
for i in range(0,len(events)):
line,ev = events[i]
if ev == AUTOTUNE_INITIALISED:
if j is not None:
attempts.append(TestAutotune.AutotuneSession(events[j:i]))
j = i
# last attempt
if j is not None:
attempts.append(TestAutotune.AutotuneSession(events[j:]))
for a in attempts:
# this should not be necessary!
def class_from_channel(c):
members = dict({'__init__':lambda x: setattr(x,i,None) for i in logdata.channels[c]})
cls = type(\
'Channel__{:s}'.format(c),
(object,),
members
)
return cls
# last wins
if a.success:
self.result.status = TestResult.StatusType.GOOD
s = "[+]"
elif a.failure:
self.result.status = TestResult.StatusType.FAIL
s = "[-]"
else:
self.result.status = TestResult.StatusType.UNKNOWN
s = "[?]"
s += " Autotune {}-{}\n".format(a.linestart,a.linestop)
self.result.statusMessage += s
if verbose:
linenext = a.linestart + 1
while linenext < a.linestop:
try:
line = logdata.channels['ATUN']['RateMax'].getNearestValueFwd(linenext)[1]
if line > a.linestop:
break
except:
break
atun = class_from_channel('ATUN')()
for key in logdata.channels['ATUN']:
setattr(atun, key, logdata.channels['ATUN'][key].getNearestValueFwd(linenext)[0])
linenext = logdata.channels['ATUN'][key].getNearestValueFwd(linenext)[1] + 1
self.result.statusMessage += 'ATUN Axis:{atun.Axis} TuneStep:{atun.TuneStep} RateMin:{atun.RateMin:5.0f} RateMax:{atun.RateMax:5.0f} RPGain:{atun.RPGain:1.4f} RDGain:{atun.RDGain:1.4f} SPGain:{atun.SPGain:1.1f} (@line:{l})\n'.format(l=linenext,s=s, atun=atun)
self.result.statusMessage += '\n'
| gpl-3.0 |
nanolearningllc/edx-platform-cypress-2 | common/test/acceptance/tests/lms/test_lms_acid_xblock.py | 122 | 5837 | # -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
from unittest import expectedFailure
from ..helpers import UniqueCourseTest
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.course_info import CourseInfoPage
from ...pages.lms.tab_nav import TabNavPage
from ...pages.xblock.acid import AcidView
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
class XBlockAcidBase(UniqueCourseTest):
"""
Base class for tests that verify that XBlock integration is working correctly
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(XBlockAcidBase, self).setUp()
self.setup_fixtures()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
def validate_acid_block_view(self, acid_block):
"""
Verify that the LMS view for the Acid Block is correct
"""
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('user_state'))
self.assertTrue(acid_block.scope_passed('user_state_summary'))
self.assertTrue(acid_block.scope_passed('preferences'))
self.assertTrue(acid_block.scope_passed('user_info'))
class XBlockAcidNoChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with no children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
class XBlockAcidChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
XBlockFixtureDesc('acid', 'First Acid Child', metadata={'name': 'first'}),
XBlockFixtureDesc('acid', 'Second Acid Child', metadata={'name': 'second'}),
XBlockFixtureDesc('html', 'Html Child', data="<html>Contents</html>"),
)
)
)
)
).install()
def validate_acid_parent_block_view(self, acid_parent_block):
super(XBlockAcidChildTest, self).validate_acid_block_view(acid_parent_block)
self.assertTrue(acid_parent_block.child_tests_passed)
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
acid_parent_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid_parent]')
self.validate_acid_parent_block_view(acid_parent_block)
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
class XBlockAcidAsideTest(XBlockAcidBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
@expectedFailure
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
acid_aside = AcidView(self.browser, '.xblock_asides-v1-student_view[data-block-type=acid_aside]')
self.validate_acid_aside_view(acid_aside)
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
def validate_acid_aside_view(self, acid_aside):
self.validate_acid_block_view(acid_aside)
| agpl-3.0 |
themiken/mtasa-blue | vendor/google-breakpad/src/third_party/protobuf/protobuf/gtest/test/gtest_nc_test.py | 277 | 3758 | #!/usr/bin/env python
#
# Copyright 2007, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Negative compilation test for Google Test."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
import unittest
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
if not IS_LINUX:
sys.exit(0) # Negative compilation tests are not supported on Windows & Mac.
class GTestNCTest(unittest.TestCase):
"""Negative compilation test for Google Test."""
def testCompilerError(self):
"""Verifies that erroneous code leads to expected compiler
messages."""
# Defines a list of test specs, where each element is a tuple
# (test name, list of regexes for matching the compiler errors).
test_specs = [
('CANNOT_IGNORE_RUN_ALL_TESTS_RESULT',
[r'ignoring return value']),
('USER_CANNOT_INCLUDE_GTEST_INTERNAL_INL_H',
[r'must not be included except by Google Test itself']),
('CATCHES_DECLARING_SETUP_IN_TEST_FIXTURE_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_CALLING_SETUP_IN_TEST_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_DECLARING_SETUP_IN_ENVIRONMENT_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_CALLING_SETUP_IN_ENVIRONMENT_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_WRONG_CASE_IN_TYPED_TEST_P',
[r'BarTest.*was not declared']),
('CATCHES_WRONG_CASE_IN_REGISTER_TYPED_TEST_CASE_P',
[r'BarTest.*was not declared']),
('CATCHES_WRONG_CASE_IN_INSTANTIATE_TYPED_TEST_CASE_P',
[r'BarTest.*not declared']),
('CATCHES_INSTANTIATE_TYPED_TESET_CASE_P_WITH_SAME_NAME_PREFIX',
[r'redefinition of.*My.*FooTest']),
('STATIC_ASSERT_TYPE_EQ_IS_NOT_A_TYPE',
[r'StaticAssertTypeEq.* does not name a type']),
('STATIC_ASSERT_TYPE_EQ_WORKS_IN_NAMESPACE',
[r'StaticAssertTypeEq.*int.*const int']),
('STATIC_ASSERT_TYPE_EQ_WORKS_IN_CLASS',
[r'StaticAssertTypeEq.*int.*bool']),
('STATIC_ASSERT_TYPE_EQ_WORKS_IN_FUNCTION',
[r'StaticAssertTypeEq.*const int.*int']),
('SANITY',
None)
]
# TODO([email protected]): verify that the test specs are satisfied.
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
noironetworks/neutron | neutron/common/profiler.py | 5 | 1934 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import context
from oslo_config import cfg
from oslo_log import log as logging
import osprofiler.initializer
from osprofiler import opts as profiler_opts
import osprofiler.web
CONF = cfg.CONF
profiler_opts.set_defaults(CONF)
LOG = logging.getLogger(__name__)
def setup(name, host='0.0.0.0'): # nosec
"""Setup OSprofiler notifier and enable profiling.
:param name: name of the service, that will be profiled
:param host: host (either host name or host address) the service will be
running on. By default host will be set to 0.0.0.0, but more
specified host name / address usage is highly recommended.
"""
if CONF.profiler.enabled:
osprofiler.initializer.init_from_conf(
conf=CONF,
context=context.get_admin_context().to_dict(),
project="neutron",
service=name,
host=host
)
LOG.info("OSProfiler is enabled.\n"
"Traces provided from the profiler "
"can only be subscribed to using the same HMAC keys that "
"are configured in Neutron's configuration file "
"under the [profiler] section.\n To disable OSprofiler "
"set in /etc/neutron/neutron.conf:\n"
"[profiler]\n"
"enabled=false")
| apache-2.0 |
inmcm/Simon_Speck_Ciphers | Python/simonspeckciphers/speck/speck.py | 1 | 10094 | from __future__ import print_function
class SpeckCipher(object):
"""Speck Block Cipher Object"""
# valid cipher configurations stored:
# block_size:{key_size:number_rounds}
__valid_setups = {32: {64: 22},
48: {72: 22, 96: 23},
64: {96: 26, 128: 27},
96: {96: 28, 144: 29},
128: {128: 32, 192: 33, 256: 34}}
__valid_modes = ['ECB', 'CTR', 'CBC', 'PCBC', 'CFB', 'OFB']
def encrypt_round(self, x, y, k):
"""Complete One Round of Feistel Operation"""
rs_x = ((x << (self.word_size - self.alpha_shift)) + (x >> self.alpha_shift)) & self.mod_mask
add_sxy = (rs_x + y) & self.mod_mask
new_x = k ^ add_sxy
ls_y = ((y >> (self.word_size - self.beta_shift)) + (y << self.beta_shift)) & self.mod_mask
new_y = new_x ^ ls_y
return new_x, new_y
def decrypt_round(self, x, y, k):
"""Complete One Round of Inverse Feistel Operation"""
xor_xy = x ^ y
new_y = ((xor_xy << (self.word_size - self.beta_shift)) + (xor_xy >> self.beta_shift)) & self.mod_mask
xor_xk = x ^ k
msub = ((xor_xk - new_y) + self.mod_mask_sub) % self.mod_mask_sub
new_x = ((msub >> (self.word_size - self.alpha_shift)) + (msub << self.alpha_shift)) & self.mod_mask
return new_x, new_y
def __init__(self, key, key_size=128, block_size=128, mode='ECB', init=0, counter=0):
# Setup block/word size
try:
self.possible_setups = self.__valid_setups[block_size]
self.block_size = block_size
self.word_size = self.block_size >> 1
except KeyError:
print('Invalid block size!')
print('Please use one of the following block sizes:', [x for x in self.__valid_setups.keys()])
raise
# Setup Number of Rounds and Key Size
try:
self.rounds = self.possible_setups[key_size]
self.key_size = key_size
except KeyError:
print('Invalid key size for selected block size!!')
print('Please use one of the following key sizes:', [x for x in self.possible_setups.keys()])
raise
# Create Properly Sized bit mask for truncating addition and left shift outputs
self.mod_mask = (2 ** self.word_size) - 1
# Mod mask for modular subtraction
self.mod_mask_sub = (2 ** self.word_size)
# Setup Circular Shift Parameters
if self.block_size == 32:
self.beta_shift = 2
self.alpha_shift = 7
else:
self.beta_shift = 3
self.alpha_shift = 8
# Parse the given iv and truncate it to the block length
try:
self.iv = init & ((2 ** self.block_size) - 1)
self.iv_upper = self.iv >> self.word_size
self.iv_lower = self.iv & self.mod_mask
except (ValueError, TypeError):
print('Invalid IV Value!')
print('Please Provide IV as int')
raise
# Parse the given Counter and truncate it to the block length
try:
self.counter = counter & ((2 ** self.block_size) - 1)
except (ValueError, TypeError):
print('Invalid Counter Value!')
print('Please Provide Counter as int')
raise
# Check Cipher Mode
try:
position = self.__valid_modes.index(mode)
self.mode = self.__valid_modes[position]
except ValueError:
print('Invalid cipher mode!')
print('Please use one of the following block cipher modes:', self.__valid_modes)
raise
# Parse the given key and truncate it to the key length
try:
self.key = key & ((2 ** self.key_size) - 1)
except (ValueError, TypeError):
print('Invalid Key Value!')
print('Please Provide Key as int')
raise
# Pre-compile key schedule
self.key_schedule = [self.key & self.mod_mask]
l_schedule = [(self.key >> (x * self.word_size)) & self.mod_mask for x in
range(1, self.key_size // self.word_size)]
for x in range(self.rounds - 1):
new_l_k = self.encrypt_round(l_schedule[x], self.key_schedule[x], x)
l_schedule.append(new_l_k[0])
self.key_schedule.append(new_l_k[1])
def encrypt(self, plaintext):
try:
b = (plaintext >> self.word_size) & self.mod_mask
a = plaintext & self.mod_mask
except TypeError:
print('Invalid plaintext!')
print('Please provide plaintext as int')
raise
if self.mode == 'ECB':
b, a = self.encrypt_function(b, a)
elif self.mode == 'CTR':
true_counter = self.iv + self.counter
d = (true_counter >> self.word_size) & self.mod_mask
c = true_counter & self.mod_mask
d, c = self.encrypt_function(d, c)
b ^= d
a ^= c
self.counter += 1
elif self.mode == 'CBC':
b ^= self.iv_upper
a ^= self.iv_lower
b, a = self.encrypt_function(b, a)
self.iv_upper = b
self.iv_lower = a
self.iv = (b << self.word_size) + a
elif self.mode == 'PCBC':
f, e = b, a
b ^= self.iv_upper
a ^= self.iv_lower
b, a = self.encrypt_function(b, a)
self.iv_upper = (b ^ f)
self.iv_lower = (a ^ e)
self.iv = (self.iv_upper << self.word_size) + self.iv_lower
elif self.mode == 'CFB':
d = self.iv_upper
c = self.iv_lower
d, c = self.encrypt_function(d, c)
b ^= d
a ^= c
self.iv_upper = b
self.iv_lower = a
self.iv = (b << self.word_size) + a
elif self.mode == 'OFB':
d = self.iv_upper
c = self.iv_lower
d, c = self.encrypt_function(d, c)
self.iv_upper = d
self.iv_lower = c
self.iv = (d << self.word_size) + c
b ^= d
a ^= c
ciphertext = (b << self.word_size) + a
return ciphertext
def decrypt(self, ciphertext):
try:
b = (ciphertext >> self.word_size) & self.mod_mask
a = ciphertext & self.mod_mask
except TypeError:
print('Invalid ciphertext!')
print('Please provide plaintext as int')
raise
if self.mode == 'ECB':
b, a = self.decrypt_function(b, a)
elif self.mode == 'CTR':
true_counter = self.iv + self.counter
d = (true_counter >> self.word_size) & self.mod_mask
c = true_counter & self.mod_mask
d, c = self.encrypt_function(d, c)
b ^= d
a ^= c
self.counter += 1
elif self.mode == 'CBC':
f, e = b, a
b, a = self.decrypt_function(b, a)
b ^= self.iv_upper
a ^= self.iv_lower
self.iv_upper = f
self.iv_lower = e
self.iv = (f << self.word_size) + e
elif self.mode == 'PCBC':
f, e = b, a
b, a = self.decrypt_function(b, a)
b ^= self.iv_upper
a ^= self.iv_lower
self.iv_upper = (b ^ f)
self.iv_lower = (a ^ e)
self.iv = (self.iv_upper << self.word_size) + self.iv_lower
elif self.mode == 'CFB':
d = self.iv_upper
c = self.iv_lower
self.iv_upper = b
self.iv_lower = a
self.iv = (b << self.word_size) + a
d, c = self.encrypt_function(d, c)
b ^= d
a ^= c
elif self.mode == 'OFB':
d = self.iv_upper
c = self.iv_lower
d, c = self.encrypt_function(d, c)
self.iv_upper = d
self.iv_lower = c
self.iv = (d << self.word_size) + c
b ^= d
a ^= c
plaintext = (b << self.word_size) + a
return plaintext
def encrypt_function(self, upper_word, lower_word):
x = upper_word
y = lower_word
# Run Encryption Steps For Appropriate Number of Rounds
for k in self.key_schedule:
rs_x = ((x << (self.word_size - self.alpha_shift)) + (x >> self.alpha_shift)) & self.mod_mask
add_sxy = (rs_x + y) & self.mod_mask
x = k ^ add_sxy
ls_y = ((y >> (self.word_size - self.beta_shift)) + (y << self.beta_shift)) & self.mod_mask
y = x ^ ls_y
return x,y
def decrypt_function(self, upper_word, lower_word):
x = upper_word
y = lower_word
# Run Encryption Steps For Appropriate Number of Rounds
for k in reversed(self.key_schedule):
xor_xy = x ^ y
y = ((xor_xy << (self.word_size - self.beta_shift)) + (xor_xy >> self.beta_shift)) & self.mod_mask
xor_xk = x ^ k
msub = ((xor_xk - y) + self.mod_mask_sub) % self.mod_mask_sub
x = ((msub >> (self.word_size - self.alpha_shift)) + (msub << self.alpha_shift)) & self.mod_mask
return x,y
def update_iv(self, new_iv=None):
if new_iv:
try:
self.iv = new_iv & ((2 ** self.block_size) - 1)
self.iv_upper = self.iv >> self.word_size
self.iv_lower = self.iv & self.mod_mask
except TypeError:
print('Invalid Initialization Vector!')
print('Please provide IV as int')
raise
return self.iv
if __name__ == "__main__":
cipher = SpeckCipher(0x1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100, 256, 128, 'ECB')
g = cipher.encrypt(0x65736f6874206e49202e72656e6f6f70)
print(hex(g))
| mit |
keelhaule/alfanous | src/alfanous/Support/whoosh/lang/porter2.py | 11 | 9418 | # Copyright (c) 2008 Michael Dirolf (mike at dirolf dot com)
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""An implementation of the Porter2 stemming algorithm.
See http://snowball.tartarus.org/algorithms/english/stemmer.html
Adapted from pyporter2 by Michael Dirolf.
This algorithm is more correct but (at least in this implementation)
several times slower than the original porter algorithm as implemented
in whoosh.lang.porter.
"""
import re
r_exp = re.compile(r"[^aeiouy]*[aeiouy]+[^aeiouy](\w*)")
ewss_exp1 = re.compile(r"^[aeiouy][^aeiouy]$")
ewss_exp2 = re.compile(r".*[^aeiouy][aeiouy][^aeiouywxY]$")
ccy_exp = re.compile(r"([aeiouy])y")
s1a_exp = re.compile(r"[aeiouy].")
s1b_exp = re.compile(r"[aeiouy]")
def get_r1(word):
# exceptional forms
if word.startswith('gener') or word.startswith('arsen'):
return 5
if word.startswith('commun'):
return 6
# normal form
match = r_exp.match(word)
if match:
return match.start(1)
return len(word)
def get_r2(word):
match = r_exp.match(word, get_r1(word))
if match:
return match.start(1)
return len(word)
def ends_with_short_syllable(word):
if len(word) == 2:
if ewss_exp1.match(word):
return True
if ewss_exp2.match(word):
return True
return False
def is_short_word(word):
if ends_with_short_syllable(word):
if get_r1(word) == len(word):
return True
return False
def remove_initial_apostrophe(word):
if word.startswith("'"):
return word[1:]
return word
def capitalize_consonant_ys(word):
if word.startswith('y'):
word = 'Y' + word[1:]
return ccy_exp.sub('\g<1>Y', word)
def step_0(word):
if word.endswith("'s'"):
return word[:-3]
if word.endswith("'s"):
return word[:-2]
if word.endswith("'"):
return word[:-1]
return word
def step_1a(word):
if word.endswith('sses'):
return word[:-4] + 'ss'
if word.endswith('ied') or word.endswith('ies'):
if len(word) > 4:
return word[:-3] + 'i'
else:
return word[:-3] + 'ie'
if word.endswith('us') or word.endswith('ss'):
return word
if word.endswith('s'):
preceding = word[:-1]
if s1a_exp.search(preceding):
return preceding
return word
return word
doubles = ('bb', 'dd', 'ff', 'gg', 'mm', 'nn', 'pp', 'rr', 'tt')
def ends_with_double(word):
for double in doubles:
if word.endswith(double):
return True
return False
def step_1b_helper(word):
if word.endswith('at') or word.endswith('bl') or word.endswith('iz'):
return word + 'e'
if ends_with_double(word):
return word[:-1]
if is_short_word(word):
return word + 'e'
return word
s1b_suffixes = ('ed', 'edly', 'ing', 'ingly')
def step_1b(word, r1):
if word.endswith('eedly'):
if len(word) - 5 >= r1:
return word[:-3]
return word
if word.endswith('eed'):
if len(word) - 3 >= r1:
return word[:-1]
return word
for suffix in s1b_suffixes:
if word.endswith(suffix):
preceding = word[:-len(suffix)]
if s1b_exp.search(preceding):
return step_1b_helper(preceding)
return word
return word
def step_1c(word):
if word.endswith('y') or word.endswith('Y'):
if word[-2] not in 'aeiouy':
if len(word) > 2:
return word[:-1] + 'i'
return word
def step_2_helper(word, r1, end, repl, prev):
if word.endswith(end):
if len(word) - len(end) >= r1:
if prev == []:
return word[:-len(end)] + repl
for p in prev:
if word[:-len(end)].endswith(p):
return word[:-len(end)] + repl
return word
return None
s2_triples = (('ization', 'ize', []),
('ational', 'ate', []),
('fulness', 'ful', []),
('ousness', 'ous', []),
('iveness', 'ive', []),
('tional', 'tion', []),
('biliti', 'ble', []),
('lessli', 'less', []),
('entli', 'ent', []),
('ation', 'ate', []),
('alism', 'al', []),
('aliti', 'al', []),
('ousli', 'ous', []),
('iviti', 'ive', []),
('fulli', 'ful', []),
('enci', 'ence', []),
('anci', 'ance', []),
('abli', 'able', []),
('izer', 'ize', []),
('ator', 'ate', []),
('alli', 'al', []),
('bli', 'ble', []),
('ogi', 'og', ['l']),
('li', '', ['c', 'd', 'e', 'g', 'h', 'k', 'm', 'n', 'r', 't']))
def step_2(word, r1):
for trip in s2_triples:
attempt = step_2_helper(word, r1, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
def step_3_helper(word, r1, r2, end, repl, r2_necessary):
if word.endswith(end):
if len(word) - len(end) >= r1:
if not r2_necessary:
return word[:-len(end)] + repl
else:
if len(word) - len(end) >= r2:
return word[:-len(end)] + repl
return word
return None
s3_triples = (('ational', 'ate', False),
('tional', 'tion', False),
('alize', 'al', False),
('icate', 'ic', False),
('iciti', 'ic', False),
('ative', '', True),
('ical', 'ic', False),
('ness', '', False),
('ful', '', False))
def step_3(word, r1, r2):
for trip in s3_triples:
attempt = step_3_helper(word, r1, r2, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
s4_delete_list = ('al', 'ance', 'ence', 'er', 'ic', 'able', 'ible', 'ant', 'ement',
'ment', 'ent', 'ism', 'ate', 'iti', 'ous', 'ive', 'ize')
def step_4(word, r2):
for end in s4_delete_list:
if word.endswith(end):
if len(word) - len(end) >= r2:
return word[:-len(end)]
return word
if word.endswith('sion') or word.endswith('tion'):
if len(word) - 3 >= r2:
return word[:-3]
return word
def step_5(word, r1, r2):
if word.endswith('l'):
if len(word) - 1 >= r2 and word[-2] == 'l':
return word[:-1]
return word
if word.endswith('e'):
if len(word) - 1 >= r2:
return word[:-1]
if len(word) - 1 >= r1 and not ends_with_short_syllable(word[:-1]):
return word[:-1]
return word
def normalize_ys(word):
return word.replace('Y', 'y')
exceptional_forms = {'skis': 'ski',
'skies': 'sky',
'dying': 'die',
'lying': 'lie',
'tying': 'tie',
'idly': 'idl',
'gently': 'gentl',
'ugly': 'ugli',
'early': 'earli',
'only': 'onli',
'singly': 'singl',
'sky': 'sky',
'news': 'news',
'howe': 'howe',
'atlas': 'atlas',
'cosmos': 'cosmos',
'bias': 'bias',
'andes': 'andes'}
exceptional_early_exit_post_1a = frozenset(['inning', 'outing', 'canning', 'herring',
'earring', 'proceed', 'exceed', 'succeed'])
def stem(word):
if len(word) <= 2:
return word
word = remove_initial_apostrophe(word)
# handle some exceptional forms
if word in exceptional_forms:
return exceptional_forms[word]
word = capitalize_consonant_ys(word)
r1 = get_r1(word)
r2 = get_r2(word)
word = step_0(word)
word = step_1a(word)
# handle some more exceptional forms
if word in exceptional_early_exit_post_1a:
return word
word = step_1b(word, r1)
word = step_1c(word)
word = step_2(word, r1)
word = step_3(word, r1, r2)
word = step_4(word, r2)
word = step_5(word, r1, r2)
word = normalize_ys(word)
return word
| agpl-3.0 |
neerajvashistha/pa-dude | lib/python2.7/site-packages/sphinx/builders/xml.py | 3 | 3007 | # -*- coding: utf-8 -*-
"""
sphinx.builders.xml
~~~~~~~~~~~~~~~~~~~
Docutils-native XML and pseudo-XML builders.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import codecs
from os import path
from docutils import nodes
from docutils.io import StringOutput
from sphinx.builders import Builder
from sphinx.util.osutil import ensuredir, os_path
from sphinx.writers.xml import XMLWriter, PseudoXMLWriter
class XMLBuilder(Builder):
"""
Builds Docutils-native XML.
"""
name = 'xml'
format = 'xml'
out_suffix = '.xml'
allow_parallel = True
_writer_class = XMLWriter
def init(self):
pass
def get_outdated_docs(self):
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
yield docname
continue
targetname = self.env.doc2path(docname, self.outdir,
self.out_suffix)
try:
targetmtime = path.getmtime(targetname)
except Exception:
targetmtime = 0
try:
srcmtime = path.getmtime(self.env.doc2path(docname))
if srcmtime > targetmtime:
yield docname
except EnvironmentError:
# source doesn't exist anymore
pass
def get_target_uri(self, docname, typ=None):
return docname
def prepare_writing(self, docnames):
self.writer = self._writer_class(self)
def write_doc(self, docname, doctree):
# work around multiple string % tuple issues in docutils;
# replace tuples in attribute values with lists
doctree = doctree.deepcopy()
for node in doctree.traverse(nodes.Element):
for att, value in node.attributes.items():
if isinstance(value, tuple):
node.attributes[att] = list(value)
value = node.attributes[att]
if isinstance(value, list):
for i, val in enumerate(value):
if isinstance(val, tuple):
value[i] = list(val)
destination = StringOutput(encoding='utf-8')
self.writer.write(doctree, destination)
outfilename = path.join(self.outdir, os_path(docname) + self.out_suffix)
ensuredir(path.dirname(outfilename))
try:
f = codecs.open(outfilename, 'w', 'utf-8')
try:
f.write(self.writer.output)
finally:
f.close()
except (IOError, OSError) as err:
self.warn("error writing file %s: %s" % (outfilename, err))
def finish(self):
pass
class PseudoXMLBuilder(XMLBuilder):
"""
Builds pseudo-XML for display purposes.
"""
name = 'pseudoxml'
format = 'pseudoxml'
out_suffix = '.pseudoxml'
_writer_class = PseudoXMLWriter
| mit |
technologiescollege/s2a_fr | s2a/Python/Lib/bsddb/test/test_compare.py | 72 | 15142 | """
TestCases for python DB duplicate and Btree key comparison function.
"""
import sys, os, re
import test_all
from cStringIO import StringIO
import unittest
from test_all import db, dbshelve, test_support, \
get_new_environment_path, get_new_database_path
# Needed for python 3. "cmp" vanished in 3.0.1
def cmp(a, b) :
if a==b : return 0
if a<b : return -1
return 1
lexical_cmp = cmp
def lowercase_cmp(left, right) :
return cmp(left.lower(), right.lower())
def make_reverse_comparator(cmp) :
def reverse(left, right, delegate=cmp) :
return - delegate(left, right)
return reverse
_expected_lexical_test_data = ['', 'CCCP', 'a', 'aaa', 'b', 'c', 'cccce', 'ccccf']
_expected_lowercase_test_data = ['', 'a', 'aaa', 'b', 'c', 'CC', 'cccce', 'ccccf', 'CCCP']
class ComparatorTests(unittest.TestCase) :
def comparator_test_helper(self, comparator, expected_data) :
data = expected_data[:]
import sys
if sys.version_info < (2, 6) :
data.sort(cmp=comparator)
else : # Insertion Sort. Please, improve
data2 = []
for i in data :
for j, k in enumerate(data2) :
r = comparator(k, i)
if r == 1 :
data2.insert(j, i)
break
else :
data2.append(i)
data = data2
self.assertEqual(data, expected_data,
"comparator `%s' is not right: %s vs. %s"
% (comparator, expected_data, data))
def test_lexical_comparator(self) :
self.comparator_test_helper(lexical_cmp, _expected_lexical_test_data)
def test_reverse_lexical_comparator(self) :
rev = _expected_lexical_test_data[:]
rev.reverse()
self.comparator_test_helper(make_reverse_comparator(lexical_cmp),
rev)
def test_lowercase_comparator(self) :
self.comparator_test_helper(lowercase_cmp,
_expected_lowercase_test_data)
class AbstractBtreeKeyCompareTestCase(unittest.TestCase) :
env = None
db = None
if (sys.version_info < (2, 7)) or ((sys.version_info >= (3,0)) and
(sys.version_info < (3, 2))) :
def assertLess(self, a, b, msg=None) :
return self.assertTrue(a<b, msg=msg)
def setUp(self) :
self.filename = self.__class__.__name__ + '.db'
self.homeDir = get_new_environment_path()
env = db.DBEnv()
env.open(self.homeDir,
db.DB_CREATE | db.DB_INIT_MPOOL
| db.DB_INIT_LOCK | db.DB_THREAD)
self.env = env
def tearDown(self) :
self.closeDB()
if self.env is not None:
self.env.close()
self.env = None
test_support.rmtree(self.homeDir)
def addDataToDB(self, data) :
i = 0
for item in data:
self.db.put(item, str(i))
i = i + 1
def createDB(self, key_comparator) :
self.db = db.DB(self.env)
self.setupDB(key_comparator)
self.db.open(self.filename, "test", db.DB_BTREE, db.DB_CREATE)
def setupDB(self, key_comparator) :
self.db.set_bt_compare(key_comparator)
def closeDB(self) :
if self.db is not None:
self.db.close()
self.db = None
def startTest(self) :
pass
def finishTest(self, expected = None) :
if expected is not None:
self.check_results(expected)
self.closeDB()
def check_results(self, expected) :
curs = self.db.cursor()
try:
index = 0
rec = curs.first()
while rec:
key, ignore = rec
self.assertLess(index, len(expected),
"to many values returned from cursor")
self.assertEqual(expected[index], key,
"expected value `%s' at %d but got `%s'"
% (expected[index], index, key))
index = index + 1
rec = curs.next()
self.assertEqual(index, len(expected),
"not enough values returned from cursor")
finally:
curs.close()
class BtreeKeyCompareTestCase(AbstractBtreeKeyCompareTestCase) :
def runCompareTest(self, comparator, data) :
self.startTest()
self.createDB(comparator)
self.addDataToDB(data)
self.finishTest(data)
def test_lexical_ordering(self) :
self.runCompareTest(lexical_cmp, _expected_lexical_test_data)
def test_reverse_lexical_ordering(self) :
expected_rev_data = _expected_lexical_test_data[:]
expected_rev_data.reverse()
self.runCompareTest(make_reverse_comparator(lexical_cmp),
expected_rev_data)
def test_compare_function_useless(self) :
self.startTest()
def socialist_comparator(l, r) :
return 0
self.createDB(socialist_comparator)
self.addDataToDB(['b', 'a', 'd'])
# all things being equal the first key will be the only key
# in the database... (with the last key's value fwiw)
self.finishTest(['b'])
class BtreeExceptionsTestCase(AbstractBtreeKeyCompareTestCase) :
def test_raises_non_callable(self) :
self.startTest()
self.assertRaises(TypeError, self.createDB, 'abc')
self.assertRaises(TypeError, self.createDB, None)
self.finishTest()
def test_set_bt_compare_with_function(self) :
self.startTest()
self.createDB(lexical_cmp)
self.finishTest()
def check_results(self, results) :
pass
def test_compare_function_incorrect(self) :
self.startTest()
def bad_comparator(l, r) :
return 1
# verify that set_bt_compare checks that comparator('', '') == 0
self.assertRaises(TypeError, self.createDB, bad_comparator)
self.finishTest()
def verifyStderr(self, method, successRe) :
"""
Call method() while capturing sys.stderr output internally and
call self.fail() if successRe.search() does not match the stderr
output. This is used to test for uncatchable exceptions.
"""
stdErr = sys.stderr
sys.stderr = StringIO()
try:
method()
finally:
temp = sys.stderr
sys.stderr = stdErr
errorOut = temp.getvalue()
if not successRe.search(errorOut) :
self.fail("unexpected stderr output:\n"+errorOut)
if sys.version_info < (3, 0) : # XXX: How to do this in Py3k ???
sys.exc_traceback = sys.last_traceback = None
def _test_compare_function_exception(self) :
self.startTest()
def bad_comparator(l, r) :
if l == r:
# pass the set_bt_compare test
return 0
raise RuntimeError, "i'm a naughty comparison function"
self.createDB(bad_comparator)
#print "\n*** test should print 2 uncatchable tracebacks ***"
self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
self.finishTest()
def test_compare_function_exception(self) :
self.verifyStderr(
self._test_compare_function_exception,
re.compile('(^RuntimeError:.* naughty.*){2}', re.M|re.S)
)
def _test_compare_function_bad_return(self) :
self.startTest()
def bad_comparator(l, r) :
if l == r:
# pass the set_bt_compare test
return 0
return l
self.createDB(bad_comparator)
#print "\n*** test should print 2 errors about returning an int ***"
self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
self.finishTest()
def test_compare_function_bad_return(self) :
self.verifyStderr(
self._test_compare_function_bad_return,
re.compile('(^TypeError:.* return an int.*){2}', re.M|re.S)
)
def test_cannot_assign_twice(self) :
def my_compare(a, b) :
return 0
self.startTest()
self.createDB(my_compare)
self.assertRaises(RuntimeError, self.db.set_bt_compare, my_compare)
class AbstractDuplicateCompareTestCase(unittest.TestCase) :
env = None
db = None
if (sys.version_info < (2, 7)) or ((sys.version_info >= (3,0)) and
(sys.version_info < (3, 2))) :
def assertLess(self, a, b, msg=None) :
return self.assertTrue(a<b, msg=msg)
def setUp(self) :
self.filename = self.__class__.__name__ + '.db'
self.homeDir = get_new_environment_path()
env = db.DBEnv()
env.open(self.homeDir,
db.DB_CREATE | db.DB_INIT_MPOOL
| db.DB_INIT_LOCK | db.DB_THREAD)
self.env = env
def tearDown(self) :
self.closeDB()
if self.env is not None:
self.env.close()
self.env = None
test_support.rmtree(self.homeDir)
def addDataToDB(self, data) :
for item in data:
self.db.put("key", item)
def createDB(self, dup_comparator) :
self.db = db.DB(self.env)
self.setupDB(dup_comparator)
self.db.open(self.filename, "test", db.DB_BTREE, db.DB_CREATE)
def setupDB(self, dup_comparator) :
self.db.set_flags(db.DB_DUPSORT)
self.db.set_dup_compare(dup_comparator)
def closeDB(self) :
if self.db is not None:
self.db.close()
self.db = None
def startTest(self) :
pass
def finishTest(self, expected = None) :
if expected is not None:
self.check_results(expected)
self.closeDB()
def check_results(self, expected) :
curs = self.db.cursor()
try:
index = 0
rec = curs.first()
while rec:
ignore, data = rec
self.assertLess(index, len(expected),
"to many values returned from cursor")
self.assertEqual(expected[index], data,
"expected value `%s' at %d but got `%s'"
% (expected[index], index, data))
index = index + 1
rec = curs.next()
self.assertEqual(index, len(expected),
"not enough values returned from cursor")
finally:
curs.close()
class DuplicateCompareTestCase(AbstractDuplicateCompareTestCase) :
def runCompareTest(self, comparator, data) :
self.startTest()
self.createDB(comparator)
self.addDataToDB(data)
self.finishTest(data)
def test_lexical_ordering(self) :
self.runCompareTest(lexical_cmp, _expected_lexical_test_data)
def test_reverse_lexical_ordering(self) :
expected_rev_data = _expected_lexical_test_data[:]
expected_rev_data.reverse()
self.runCompareTest(make_reverse_comparator(lexical_cmp),
expected_rev_data)
class DuplicateExceptionsTestCase(AbstractDuplicateCompareTestCase) :
def test_raises_non_callable(self) :
self.startTest()
self.assertRaises(TypeError, self.createDB, 'abc')
self.assertRaises(TypeError, self.createDB, None)
self.finishTest()
def test_set_dup_compare_with_function(self) :
self.startTest()
self.createDB(lexical_cmp)
self.finishTest()
def check_results(self, results) :
pass
def test_compare_function_incorrect(self) :
self.startTest()
def bad_comparator(l, r) :
return 1
# verify that set_dup_compare checks that comparator('', '') == 0
self.assertRaises(TypeError, self.createDB, bad_comparator)
self.finishTest()
def test_compare_function_useless(self) :
self.startTest()
def socialist_comparator(l, r) :
return 0
self.createDB(socialist_comparator)
# DUPSORT does not allow "duplicate duplicates"
self.assertRaises(db.DBKeyExistError, self.addDataToDB, ['b', 'a', 'd'])
self.finishTest()
def verifyStderr(self, method, successRe) :
"""
Call method() while capturing sys.stderr output internally and
call self.fail() if successRe.search() does not match the stderr
output. This is used to test for uncatchable exceptions.
"""
stdErr = sys.stderr
sys.stderr = StringIO()
try:
method()
finally:
temp = sys.stderr
sys.stderr = stdErr
errorOut = temp.getvalue()
if not successRe.search(errorOut) :
self.fail("unexpected stderr output:\n"+errorOut)
if sys.version_info < (3, 0) : # XXX: How to do this in Py3k ???
sys.exc_traceback = sys.last_traceback = None
def _test_compare_function_exception(self) :
self.startTest()
def bad_comparator(l, r) :
if l == r:
# pass the set_dup_compare test
return 0
raise RuntimeError, "i'm a naughty comparison function"
self.createDB(bad_comparator)
#print "\n*** test should print 2 uncatchable tracebacks ***"
self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
self.finishTest()
def test_compare_function_exception(self) :
self.verifyStderr(
self._test_compare_function_exception,
re.compile('(^RuntimeError:.* naughty.*){2}', re.M|re.S)
)
def _test_compare_function_bad_return(self) :
self.startTest()
def bad_comparator(l, r) :
if l == r:
# pass the set_dup_compare test
return 0
return l
self.createDB(bad_comparator)
#print "\n*** test should print 2 errors about returning an int ***"
self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
self.finishTest()
def test_compare_function_bad_return(self) :
self.verifyStderr(
self._test_compare_function_bad_return,
re.compile('(^TypeError:.* return an int.*){2}', re.M|re.S)
)
def test_cannot_assign_twice(self) :
def my_compare(a, b) :
return 0
self.startTest()
self.createDB(my_compare)
self.assertRaises(RuntimeError, self.db.set_dup_compare, my_compare)
def test_suite() :
res = unittest.TestSuite()
res.addTest(unittest.makeSuite(ComparatorTests))
res.addTest(unittest.makeSuite(BtreeExceptionsTestCase))
res.addTest(unittest.makeSuite(BtreeKeyCompareTestCase))
res.addTest(unittest.makeSuite(DuplicateExceptionsTestCase))
res.addTest(unittest.makeSuite(DuplicateCompareTestCase))
return res
if __name__ == '__main__':
unittest.main(defaultTest = 'suite')
| gpl-3.0 |
fuselock/odoo | addons/hr_expense/tests/test_journal_entries.py | 251 | 2923 | from openerp.tests.common import TransactionCase
from openerp import netsvc, workflow
class TestCheckJournalEntry(TransactionCase):
"""
Check journal entries when the expense product is having tax which is tax included.
"""
def setUp(self):
super(TestCheckJournalEntry, self).setUp()
cr, uid = self.cr, self.uid
self.expense_obj = self.registry('hr.expense.expense')
self.exp_line_obj = self.registry('hr.expense.line')
self.product_obj = self.registry('product.product')
self.tax_obj = self.registry('account.tax')
self.code_obj = self.registry('account.tax.code')
_, self.product_id = self.registry("ir.model.data").get_object_reference(cr, uid, "hr_expense", "air_ticket")
_, self.employee_id = self.registry("ir.model.data").get_object_reference(cr, uid, "hr", "employee_mit")
self.base_code_id = self.code_obj.create(cr, uid, {'name': 'Expense Base Code'})
self.tax_id = self.tax_obj.create(cr, uid, {
'name': 'Expense 10%',
'amount': 0.10,
'type': 'percent',
'type_tax_use': 'purchase',
'price_include': True,
'base_code_id': self.base_code_id,
'base_sign': -1,
})
self.product_obj.write(cr, uid, self.product_id, {'supplier_taxes_id': [(6, 0, [self.tax_id])]})
self.expense_id = self.expense_obj.create(cr, uid, {
'name': 'Expense for Minh Tran',
'employee_id': self.employee_id,
})
self.exp_line_obj.create(cr, uid, {
'name': 'Car Travel Expenses',
'product_id': self.product_id,
'unit_amount': 700.00,
'expense_id': self.expense_id
})
def test_journal_entry(self):
cr, uid = self.cr, self.uid
#Submit to Manager
workflow.trg_validate(uid, 'hr.expense.expense', self.expense_id, 'confirm', cr)
#Approve
workflow.trg_validate(uid, 'hr.expense.expense', self.expense_id, 'validate', cr)
#Create Expense Entries
workflow.trg_validate(uid, 'hr.expense.expense', self.expense_id, 'done', cr)
self.expense = self.expense_obj.browse(cr, uid, self.expense_id)
self.assertEquals(self.expense.state, 'done', 'Expense is not in Waiting Payment state')
self.assertTrue(self.expense.account_move_id.id, 'Expense Journal Entry is not created')
for line in self.expense.account_move_id.line_id:
if line.credit:
self.assertEquals(line.credit, 700.00, 'Expense Payable Amount is not matched for journal item')
else:
if line.tax_code_id:
self.assertEquals(line.debit, 636.36, 'Tax Amount is not matched for journal item')
else:
self.assertEquals(line.debit, 63.64, 'Tax Base Amount is not matched for journal item')
| agpl-3.0 |
pinterb/st2incubator | packs/st2cd/actions/action_run.py | 1 | 1636 | #!/usr/bin/env python
import os
import sys
import time
import json
import argparse
from st2client import models
from st2client.client import Client
END_STATES = ['succeeded', 'failed']
ST2HOST = 'localhost'
parser = argparse.ArgumentParser()
parser.add_argument('--name', action="store", dest="name", required=True)
parser.add_argument('--action', action="store", dest="action", required=True)
parser.add_argument('--params', action="store", dest="params")
parser.add_argument('--token', action="store", dest="token")
args = parser.parse_args()
runner = None
os.environ['ST2_AUTH_TOKEN'] = args.token
def runAction(action_ref, params):
client = Client()
action_exec_mgr = client.managers['LiveAction']
execution = models.LiveAction()
execution.action = action_ref
execution.parameters = param_parser(params)
actionexec = action_exec_mgr.create(execution)
while actionexec.status not in END_STATES:
time.sleep(2)
actionexec = action_exec_mgr.get_by_id(actionexec.id)
return actionexec
def param_parser(params):
parameters = {}
if params is not None:
param_list = params.split(' ')
for p in param_list:
if '=' in p:
k, v = p.split('=', 1)
if ',' in v:
v = filter(None, v.split(','))
else:
k = 'cmd'
v = p
parameters[k] = v
return parameters
actionexec = runAction(action_ref=args.action, params=args.params)
output = {args.name: actionexec.result}
print json.dumps(output)
if actionexec.status != 'succeeded':
sys.exit(2)
| apache-2.0 |
pyKy/kivy-doc-ja | kivy/tests/test_uix_boxlayout.py | 78 | 2666 | '''
Box layout unit test
====================
Order matter.
On the screen, most of example must have the red->blue->green order.
'''
from kivy.tests.common import GraphicUnitTest
class UIXBoxLayoutTestcase(GraphicUnitTest):
def box(self, r, g, b):
from kivy.uix.widget import Widget
from kivy.graphics import Color, Rectangle
wid = Widget()
with wid.canvas:
Color(r, g, b)
r = Rectangle(pos=wid.pos, size=wid.size)
def linksp(instance, *largs):
r.pos = instance.pos
r.size = instance.size
wid.bind(pos=linksp, size=linksp)
return wid
def test_boxlayout_orientation(self):
from kivy.uix.boxlayout import BoxLayout
r = self.render
b = self.box
layout = BoxLayout()
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
layout = BoxLayout(orientation='vertical')
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
def test_boxlayout_spacing(self):
from kivy.uix.boxlayout import BoxLayout
r = self.render
b = self.box
layout = BoxLayout(spacing=20)
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
layout = BoxLayout(spacing=20, orientation='vertical')
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
def test_boxlayout_padding(self):
from kivy.uix.boxlayout import BoxLayout
r = self.render
b = self.box
layout = BoxLayout(padding=20)
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
layout = BoxLayout(padding=20, orientation='vertical')
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
def test_boxlayout_padding_spacing(self):
from kivy.uix.boxlayout import BoxLayout
r = self.render
b = self.box
layout = BoxLayout(spacing=20, padding=20)
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
layout = BoxLayout(spacing=20, padding=20, orientation='vertical')
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
| mit |
viewfinderco/viewfinder | backend/www/view.py | 13 | 1821 | # Copyright 2011 Viewfinder Inc. All Rights Reserved.
"""Handlers for rendering views, or streams of images.
Views are searches over the image database in the context of a
particular logged in user, browser-reported location, and current
time.
ViewHandler: Returns JSON data containing image locations based
on request parameters.
"""
__author__ = '[email protected] (Spencer Kimball)'
import base
from functools import partial
from operator import attrgetter
from tornado import auth, template
from viewfinder.backend.base import handler
from viewfinder.backend.db import contact, user
class ViewHandler(base.BaseHandler):
"""Displays the main /view page."""
@handler.authenticated(allow_prospective=True)
@handler.asynchronous(datastore=True)
def get(self):
context = base.ViewfinderContext.current()
self.render('view.html',
is_registered=context.user.IsRegistered(),
user_info={'user_id' : context.user.user_id,
'name' : context.user.name,
'email' : context.user.email,
'phone' : context.user.phone,
'default_viewpoint_id' : context.user.private_vp_id
},
viewpoint_id=context.viewpoint_id)
class ViewBetaHandler(base.BaseHandler):
"""Displays a beta version of the /view page, which may have additional features enabled for testing."""
@handler.authenticated(allow_prospective=True)
@handler.asynchronous(datastore=True)
def get(self):
context = base.ViewfinderContext.current()
self.render('view_beta.html',
is_registered=context.user.IsRegistered(),
user_id=context.user.user_id,
viewpoint_id = context.viewpoint_id)
| apache-2.0 |
powerjg/gem5-ci-test | ext/pybind11/tools/mkdoc.py | 13 | 10474 | #!/usr/bin/env python3
#
# Syntax: mkdoc.py [-I<path> ..] [.. a list of header files ..]
#
# Extract documentation from C++ header files to use it in Python bindings
#
import os
import sys
import platform
import re
import textwrap
from clang import cindex
from clang.cindex import CursorKind
from collections import OrderedDict
from threading import Thread, Semaphore
from multiprocessing import cpu_count
RECURSE_LIST = [
CursorKind.TRANSLATION_UNIT,
CursorKind.NAMESPACE,
CursorKind.CLASS_DECL,
CursorKind.STRUCT_DECL,
CursorKind.ENUM_DECL,
CursorKind.CLASS_TEMPLATE
]
PRINT_LIST = [
CursorKind.CLASS_DECL,
CursorKind.STRUCT_DECL,
CursorKind.ENUM_DECL,
CursorKind.ENUM_CONSTANT_DECL,
CursorKind.CLASS_TEMPLATE,
CursorKind.FUNCTION_DECL,
CursorKind.FUNCTION_TEMPLATE,
CursorKind.CONVERSION_FUNCTION,
CursorKind.CXX_METHOD,
CursorKind.CONSTRUCTOR,
CursorKind.FIELD_DECL
]
CPP_OPERATORS = {
'<=': 'le', '>=': 'ge', '==': 'eq', '!=': 'ne', '[]': 'array',
'+=': 'iadd', '-=': 'isub', '*=': 'imul', '/=': 'idiv', '%=':
'imod', '&=': 'iand', '|=': 'ior', '^=': 'ixor', '<<=': 'ilshift',
'>>=': 'irshift', '++': 'inc', '--': 'dec', '<<': 'lshift', '>>':
'rshift', '&&': 'land', '||': 'lor', '!': 'lnot', '~': 'bnot',
'&': 'band', '|': 'bor', '+': 'add', '-': 'sub', '*': 'mul', '/':
'div', '%': 'mod', '<': 'lt', '>': 'gt', '=': 'assign', '()': 'call'
}
CPP_OPERATORS = OrderedDict(
sorted(CPP_OPERATORS.items(), key=lambda t: -len(t[0])))
job_count = cpu_count()
job_semaphore = Semaphore(job_count)
registered_names = dict()
def d(s):
return s.decode('utf8')
def sanitize_name(name):
global registered_names
name = re.sub(r'type-parameter-0-([0-9]+)', r'T\1', name)
for k, v in CPP_OPERATORS.items():
name = name.replace('operator%s' % k, 'operator_%s' % v)
name = re.sub('<.*>', '', name)
name = ''.join([ch if ch.isalnum() else '_' for ch in name])
name = re.sub('_$', '', re.sub('_+', '_', name))
if name in registered_names:
registered_names[name] += 1
name += '_' + str(registered_names[name])
else:
registered_names[name] = 1
return '__doc_' + name
def process_comment(comment):
result = ''
# Remove C++ comment syntax
leading_spaces = float('inf')
for s in comment.expandtabs(tabsize=4).splitlines():
s = s.strip()
if s.startswith('/*'):
s = s[2:].lstrip('*')
elif s.endswith('*/'):
s = s[:-2].rstrip('*')
elif s.startswith('///'):
s = s[3:]
if s.startswith('*'):
s = s[1:]
if len(s) > 0:
leading_spaces = min(leading_spaces, len(s) - len(s.lstrip()))
result += s + '\n'
if leading_spaces != float('inf'):
result2 = ""
for s in result.splitlines():
result2 += s[leading_spaces:] + '\n'
result = result2
# Doxygen tags
cpp_group = '([\w:]+)'
param_group = '([\[\w:\]]+)'
s = result
s = re.sub(r'\\c\s+%s' % cpp_group, r'``\1``', s)
s = re.sub(r'\\a\s+%s' % cpp_group, r'*\1*', s)
s = re.sub(r'\\e\s+%s' % cpp_group, r'*\1*', s)
s = re.sub(r'\\em\s+%s' % cpp_group, r'*\1*', s)
s = re.sub(r'\\b\s+%s' % cpp_group, r'**\1**', s)
s = re.sub(r'\\ingroup\s+%s' % cpp_group, r'', s)
s = re.sub(r'\\param%s?\s+%s' % (param_group, cpp_group),
r'\n\n$Parameter ``\2``:\n\n', s)
s = re.sub(r'\\tparam%s?\s+%s' % (param_group, cpp_group),
r'\n\n$Template parameter ``\2``:\n\n', s)
for in_, out_ in {
'return': 'Returns',
'author': 'Author',
'authors': 'Authors',
'copyright': 'Copyright',
'date': 'Date',
'remark': 'Remark',
'sa': 'See also',
'see': 'See also',
'extends': 'Extends',
'throw': 'Throws',
'throws': 'Throws'
}.items():
s = re.sub(r'\\%s\s*' % in_, r'\n\n$%s:\n\n' % out_, s)
s = re.sub(r'\\details\s*', r'\n\n', s)
s = re.sub(r'\\brief\s*', r'', s)
s = re.sub(r'\\short\s*', r'', s)
s = re.sub(r'\\ref\s*', r'', s)
s = re.sub(r'\\code\s?(.*?)\s?\\endcode',
r"```\n\1\n```\n", s, flags=re.DOTALL)
# HTML/TeX tags
s = re.sub(r'<tt>(.*?)</tt>', r'``\1``', s, flags=re.DOTALL)
s = re.sub(r'<pre>(.*?)</pre>', r"```\n\1\n```\n", s, flags=re.DOTALL)
s = re.sub(r'<em>(.*?)</em>', r'*\1*', s, flags=re.DOTALL)
s = re.sub(r'<b>(.*?)</b>', r'**\1**', s, flags=re.DOTALL)
s = re.sub(r'\\f\$(.*?)\\f\$', r'$\1$', s, flags=re.DOTALL)
s = re.sub(r'<li>', r'\n\n* ', s)
s = re.sub(r'</?ul>', r'', s)
s = re.sub(r'</li>', r'\n\n', s)
s = s.replace('``true``', '``True``')
s = s.replace('``false``', '``False``')
# Re-flow text
wrapper = textwrap.TextWrapper()
wrapper.expand_tabs = True
wrapper.replace_whitespace = True
wrapper.drop_whitespace = True
wrapper.width = 70
wrapper.initial_indent = wrapper.subsequent_indent = ''
result = ''
in_code_segment = False
for x in re.split(r'(```)', s):
if x == '```':
if not in_code_segment:
result += '```\n'
else:
result += '\n```\n\n'
in_code_segment = not in_code_segment
elif in_code_segment:
result += x.strip()
else:
for y in re.split(r'(?: *\n *){2,}', x):
wrapped = wrapper.fill(re.sub(r'\s+', ' ', y).strip())
if len(wrapped) > 0 and wrapped[0] == '$':
result += wrapped[1:] + '\n'
wrapper.initial_indent = \
wrapper.subsequent_indent = ' ' * 4
else:
if len(wrapped) > 0:
result += wrapped + '\n\n'
wrapper.initial_indent = wrapper.subsequent_indent = ''
return result.rstrip().lstrip('\n')
def extract(filename, node, prefix, output):
num_extracted = 0
if not (node.location.file is None or
os.path.samefile(d(node.location.file.name), filename)):
return 0
if node.kind in RECURSE_LIST:
sub_prefix = prefix
if node.kind != CursorKind.TRANSLATION_UNIT:
if len(sub_prefix) > 0:
sub_prefix += '_'
sub_prefix += d(node.spelling)
for i in node.get_children():
num_extracted += extract(filename, i, sub_prefix, output)
if num_extracted == 0:
return 0
if node.kind in PRINT_LIST:
comment = d(node.raw_comment) if node.raw_comment is not None else ''
comment = process_comment(comment)
sub_prefix = prefix
if len(sub_prefix) > 0:
sub_prefix += '_'
if len(node.spelling) > 0:
name = sanitize_name(sub_prefix + d(node.spelling))
output.append('\nstatic const char *%s =%sR"doc(%s)doc";' %
(name, '\n' if '\n' in comment else ' ', comment))
num_extracted += 1
return num_extracted
class ExtractionThread(Thread):
def __init__(self, filename, parameters, output):
Thread.__init__(self)
self.filename = filename
self.parameters = parameters
self.output = output
job_semaphore.acquire()
def run(self):
print('Processing "%s" ..' % self.filename, file=sys.stderr)
try:
index = cindex.Index(
cindex.conf.lib.clang_createIndex(False, True))
tu = index.parse(self.filename, self.parameters)
extract(self.filename, tu.cursor, '', self.output)
finally:
job_semaphore.release()
if __name__ == '__main__':
parameters = ['-x', 'c++', '-std=c++11']
filenames = []
if platform.system() == 'Darwin':
dev_path = '/Applications/Xcode.app/Contents/Developer/'
lib_dir = dev_path + 'Toolchains/XcodeDefault.xctoolchain/usr/lib/'
sdk_dir = dev_path + 'Platforms/MacOSX.platform/Developer/SDKs'
libclang = lib_dir + 'libclang.dylib'
if os.path.exists(libclang):
cindex.Config.set_library_path(os.path.dirname(libclang))
if os.path.exists(sdk_dir):
sysroot_dir = os.path.join(sdk_dir, next(os.walk(sdk_dir))[1][0])
parameters.append('-isysroot')
parameters.append(sysroot_dir)
for item in sys.argv[1:]:
if item.startswith('-'):
parameters.append(item)
else:
filenames.append(item)
if len(filenames) == 0:
print('Syntax: %s [.. a list of header files ..]' % sys.argv[0])
exit(-1)
print('''/*
This file contains docstrings for the Python bindings.
Do not edit! These were automatically extracted by mkdoc.py
*/
#define __EXPAND(x) x
#define __COUNT(_1, _2, _3, _4, _5, _6, _7, COUNT, ...) COUNT
#define __VA_SIZE(...) __EXPAND(__COUNT(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1))
#define __CAT1(a, b) a ## b
#define __CAT2(a, b) __CAT1(a, b)
#define __DOC1(n1) __doc_##n1
#define __DOC2(n1, n2) __doc_##n1##_##n2
#define __DOC3(n1, n2, n3) __doc_##n1##_##n2##_##n3
#define __DOC4(n1, n2, n3, n4) __doc_##n1##_##n2##_##n3##_##n4
#define __DOC5(n1, n2, n3, n4, n5) __doc_##n1##_##n2##_##n3##_##n4##_##n5
#define __DOC6(n1, n2, n3, n4, n5, n6) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6
#define __DOC7(n1, n2, n3, n4, n5, n6, n7) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6##_##n7
#define DOC(...) __EXPAND(__EXPAND(__CAT2(__DOC, __VA_SIZE(__VA_ARGS__)))(__VA_ARGS__))
#if defined(__GNUG__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
#endif
''')
output = []
for filename in filenames:
thr = ExtractionThread(filename, parameters, output)
thr.start()
print('Waiting for jobs to finish ..', file=sys.stderr)
for i in range(job_count):
job_semaphore.acquire()
output.sort()
for l in output:
print(l)
print('''
#if defined(__GNUG__)
#pragma GCC diagnostic pop
#endif
''')
| bsd-3-clause |
nexiles/odoo | addons/l10n_fr/l10n_fr.py | 336 | 2089 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class l10n_fr_report(osv.osv):
_name = 'l10n.fr.report'
_description = 'Report for l10n_fr'
_columns = {
'code': fields.char('Code', size=64),
'name': fields.char('Name'),
'line_ids': fields.one2many('l10n.fr.line', 'report_id', 'Lines', copy=True),
}
_sql_constraints = [
('code_uniq', 'unique (code)','The code report must be unique !')
]
class l10n_fr_line(osv.osv):
_name = 'l10n.fr.line'
_description = 'Report Lines for l10n_fr'
_columns = {
'code': fields.char('Variable Name', size=64),
'definition': fields.char('Definition'),
'name': fields.char('Name'),
'report_id': fields.many2one('l10n.fr.report', 'Report'),
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The variable name must be unique !')
]
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'siret': fields.char('SIRET', size=14),
'ape': fields.char('APE'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yyny1789/WinObjC | deps/3rdparty/icu/icu/source/tools/icu-svnprops-check.py | 388 | 9251 | #! /usr/bin/python
# Copyright (C) 2009-2011, International Business Machines Corporation, Google and Others.
# All rights reserved.
#
# Script to check and fix svn property settings for ICU source files.
# Also check for the correct line endings on files with svn:eol-style = native
#
# THIS SCRIPT DOES NOT WORK ON WINDOWS
# It only works correctly on platforms where the native line ending is a plain \n
#
# usage:
# icu-svnprops-check.py [options]
#
# options:
# -f | --fix Fix any problems that are found
# -h | --help Print a usage line and exit.
#
# The tool operates recursively on the directory from which it is run.
# Only files from the svn repository are checked.
# No changes are made to the repository; only the working copy will be altered.
import sys
import os
import os.path
import re
import getopt
#
# svn autoprops definitions.
# Copy and paste here the ICU recommended auto-props from
# http://icu-project.org/docs/subversion_howto/index.html
#
# This program will parse this autoprops string, and verify that files in
# the repository have the recommeded properties set.
#
svn_auto_props = """
### Section for configuring automatic properties.
[auto-props]
### The format of the entries is:
### file-name-pattern = propname[=value][;propname[=value]...]
### The file-name-pattern can contain wildcards (such as '*' and
### '?'). All entries which match will be applied to the file.
### Note that auto-props functionality must be enabled, which
### is typically done by setting the 'enable-auto-props' option.
*.c = svn:eol-style=native
*.cc = svn:eol-style=native
*.cpp = svn:eol-style=native
*.h = svn:eol-style=native
*.rc = svn:eol-style=native
*.dsp = svn:eol-style=native
*.dsw = svn:eol-style=native
*.sln = svn:eol-style=native
*.vcproj = svn:eol-style=native
configure = svn:eol-style=native;svn:executable
*.sh = svn:eol-style=native;svn:executable
*.pl = svn:eol-style=native;svn:executable
*.py = svn:eol-style=native;svn:executable
*.txt = svn:mime-type=text/plain;svn:eol-style=native
*.java = svn:eol-style=native;svn:mime-type=text/plain;;charset=utf-8
*.ucm = svn:eol-style=native
*.html = svn:eol-style=native;svn:mime-type=text/html
*.htm = svn:eol-style=native;svn:mime-type=text/html
*.xml = svn:eol-style=native
Makefile = svn:eol-style=native
*.in = svn:eol-style=native
*.mak = svn:eol-style=native
*.mk = svn:eol-style=native
*.png = svn:mime-type=image/png
*.jpeg = svn:mime-type=image/jpeg
*.jpg = svn:mime-type=image/jpeg
*.bin = svn:mime-type=application/octet-stream
*.brk = svn:mime-type=application/octet-stream
*.cnv = svn:mime-type=application/octet-stream
*.dat = svn:mime-type=application/octet-stream
*.icu = svn:mime-type=application/octet-stream
*.res = svn:mime-type=application/octet-stream
*.spp = svn:mime-type=application/octet-stream
# new additions 2007-dec-5 srl
*.rtf = mime-type=text/rtf
*.pdf = mime-type=application/pdf
# changed 2008-04-08: modified .txt, above, adding mime-type
# changed 2010-11-09: modified .java, adding mime-type
# Note: The escape syntax for semicolon (";;") is supported since subversion 1.6.1
"""
# file_types: The parsed form of the svn auto-props specification.
# A list of file types - .cc, .cpp, .txt, etc.
# each element is a [type, proplist]
# "type" is a regular expression string that will match a file name
# prop list is another list, one element per property.
# Each property item is a two element list, [prop name, prop value]
file_types = list()
def parse_auto_props():
aprops = svn_auto_props.splitlines()
for propline in aprops:
if re.match("\s*(#.*)?$", propline): # Match comment and blank lines
continue
if re.match("\s*\[auto-props\]", propline): # Match the [auto-props] line.
continue
if not re.match("\s*[^\s]+\s*=", propline): # minimal syntax check for <file-type> =
print "Bad line from autoprops definitions: " + propline
continue
file_type, string_proplist = propline.split("=", 1)
#transform the file type expression from autoprops into a normal regular expression.
# e.g. "*.cpp" ==> ".*\.cpp$"
file_type = file_type.strip()
file_type = file_type.replace(".", "\.")
file_type = file_type.replace("*", ".*")
file_type = file_type + "$"
# example string_proplist at this point: " svn:eol-style=native;svn:executable"
# split on ';' into a list of properties. The negative lookahead and lookbehind
# in the split regexp are to prevent matching on ';;', which is an escaped ';'
# within a property value.
string_proplist = re.split("(?<!;);(?!;)", string_proplist)
proplist = list()
for prop in string_proplist:
if prop.find("=") >= 0:
prop_name, prop_val = prop.split("=", 1)
else:
# properties with no explicit value, e.g. svn:executable
prop_name, prop_val = prop, ""
prop_name = prop_name.strip()
prop_val = prop_val.strip()
# unescape any ";;" in a property value, e.g. the mime-type from
# *.java = svn:eol-style=native;svn:mime-type=text/plain;;charset=utf-8
prop_val = prop_val.replace(";;", ";");
proplist.append((prop_name, prop_val))
file_types.append((file_type, proplist))
# print file_types
def runCommand(cmd):
output_file = os.popen(cmd);
output_text = output_file.read();
exit_status = output_file.close();
if exit_status:
print >>sys.stderr, '"', cmd, '" failed. Exiting.'
sys.exit(exit_status)
return output_text
def usage():
print "usage: " + sys.argv[0] + " [-f | --fix] [-h | --help]"
#
# UTF-8 file check. For text files, add a charset to the mime-type if their contents are UTF-8
# file_name: name of a text file.
# base_mime_type: svn:mime-type property value from the auto-props file (no charset= part)
# actual_mime_type: existing svn:mime-type property value for the file.
# return: svn:mime-type property value, with charset added when appropriate.
#
def check_utf8(file_name, base_mime_type, actual_mime_type):
# If the file already has a charset in its mime-type, don't make any change.
if actual_mime_type.find("charset=") > 0:
return actual_mime_type;
f = open(file_name, 'r')
bytes = f.read()
f.close()
if all(ord(byte) < 128 for byte in bytes):
# pure ASCII.
# print "Pure ASCII " + file_name
return base_mime_type
try:
bytes.decode("UTF-8")
except UnicodeDecodeError:
print "warning: %s: not ASCII, not UTF-8" % file_name
return base_mime_type
if ord(bytes[0]) != 0xef:
print "UTF-8 file with no BOM: " + file_name
# Append charset=utf-8.
return base_mime_type + ';charset=utf-8'
def main(argv):
fix_problems = False;
try:
opts, args = getopt.getopt(argv, "fh", ("fix", "help"))
except getopt.GetoptError:
print "unrecognized option: " + argv[0]
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
if opt in ("-f", "--fix"):
fix_problems = True
if args:
print "unexpected command line argument"
usage()
sys.exit()
parse_auto_props()
output = runCommand("svn ls -R ");
file_list = output.splitlines()
for f in file_list:
if os.path.isdir(f):
# print "Skipping dir " + f
continue
if not os.path.isfile(f):
print "Repository file not in working copy: " + f
continue;
for file_pattern, props in file_types:
if re.match(file_pattern, f):
# print "doing " + f
for propname, propval in props:
actual_propval = runCommand("svn propget --strict " + propname + " " + f)
#print propname + ": " + actual_propval
if propname == "svn:mime-type" and propval.find("text/") == 0:
# check for UTF-8 text files, should have svn:mime-type=text/something; charset=utf8
propval = check_utf8(f, propval, actual_propval)
if not (propval == actual_propval or (propval == "" and actual_propval == "*")):
print "svn propset %s '%s' %s" % (propname, propval, f)
if fix_problems:
os.system("svn propset %s '%s' %s" % (propname, propval, f))
if propname == "svn:eol-style" and propval == "native":
if os.system("grep -q -v \r " + f):
if fix_problems:
print f + ": Removing DOS CR characters."
os.system("sed -i s/\r// " + f);
else:
print f + " contains DOS CR characters."
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
pandeyop/rally | tests/hacking/checks.py | 3 | 12536 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Guidelines for writing new hacking checks
- Use only for Rally specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to tests/unit/test_hacking.py
"""
import functools
import re
re_assert_true_instance = re.compile(
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
r"(\w|\.|\'|\"|\[|\])+\)\)")
re_assert_equal_type = re.compile(
r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
r"(\w|\.|\'|\"|\[|\])+\)")
re_assert_equal_end_with_none = re.compile(r"assertEqual\(.*?,\s+None\)$")
re_assert_equal_start_with_none = re.compile(r"assertEqual\(None,")
re_assert_true_false_with_in_or_not_in = re.compile(
r"assert(True|False)\("
r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)")
re_assert_true_false_with_in_or_not_in_spaces = re.compile(
r"assert(True|False)\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+"
r"[\[|'|\"](, .*)?\)")
re_assert_equal_in_end_with_true_or_false = re.compile(
r"assertEqual\((\w|[][.'\"])+( not)? in (\w|[][.'\", ])+, (True|False)\)")
re_assert_equal_in_start_with_true_or_false = re.compile(
r"assertEqual\((True|False), (\w|[][.'\"])+( not)? in (\w|[][.'\", ])+\)")
re_no_construct_dict = re.compile(
r"=\sdict\(\)")
re_no_construct_list = re.compile(
r"=\slist\(\)")
def skip_ignored_lines(func):
@functools.wraps(func)
def wrapper(logical_line, filename):
line = logical_line.strip()
if not line or line.startswith("#") or line.endswith("# noqa"):
return
yield next(func(logical_line, filename))
return wrapper
def _parse_assert_mock_str(line):
point = line.find(".assert_")
if point != -1:
end_pos = line[point:].find("(") + point
return point, line[point + 1: end_pos], line[: point]
else:
return None, None, None
@skip_ignored_lines
def check_assert_methods_from_mock(logical_line, filename):
"""Ensure that ``assert_*`` methods from ``mock`` library is used correctly
N301 - base error number
N302 - related to nonexistent "assert_called"
N303 - related to nonexistent "assert_called_once"
"""
correct_names = ["assert_any_call", "assert_called_once_with",
"assert_called_with", "assert_has_calls"]
ignored_files = ["./tests/unit/test_hacking.py"]
if filename.startswith("./tests") and filename not in ignored_files:
pos, method_name, obj_name = _parse_assert_mock_str(logical_line)
if pos:
if method_name not in correct_names:
error_number = "N301"
msg = ("%(error_number)s:'%(method)s' is not present in `mock`"
" library. %(custom_msg)s For more details, visit "
"http://www.voidspace.org.uk/python/mock/ .")
if method_name == "assert_called":
error_number = "N302"
custom_msg = ("Maybe, you should try to use "
"'assertTrue(%s.called)' instead." %
obj_name)
elif method_name == "assert_called_once":
# For more details, see a bug in Rally:
# https://bugs.launchpad.net/rally/+bug/1305991
error_number = "N303"
custom_msg = ("Maybe, you should try to use "
"'assertEqual(1, %(obj_name)s.call_count)' "
"or '%(obj_name)s.assert_called_once_with()'"
" instead." % {"obj_name": obj_name})
else:
custom_msg = ("Correct 'assert_*' methods: '%s'."
% "', '".join(correct_names))
yield (pos, msg % {
"error_number": error_number,
"method": method_name,
"custom_msg": custom_msg})
@skip_ignored_lines
def check_import_of_logging(logical_line, filename):
"""Check correctness import of logging module
N310
"""
excluded_files = ["./rally/common/log.py", "./tests/unit/test_log.py"]
forbidden_imports = ["from oslo_log",
"import oslo_log",
"import logging"]
if filename not in excluded_files:
for forbidden_import in forbidden_imports:
if logical_line.startswith(forbidden_import):
yield (0, "N310 Wrong module for logging is imported. Please "
"use `rally.common.log` instead.")
@skip_ignored_lines
def no_translate_debug_logs(logical_line, filename):
"""Check for "LOG.debug(_("
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
N311
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "N311 Don't translate debug level logs")
@skip_ignored_lines
def no_use_conf_debug_check(logical_line, filename):
"""Check for "cfg.CONF.debug"
Rally has two DEBUG level:
- Full DEBUG, which include all debug-messages from all OpenStack services
- Rally DEBUG, which include only Rally debug-messages
so we should use custom check to know debug-mode, instead of CONF.debug
N312
"""
excluded_files = ["./rally/common/log.py"]
point = logical_line.find("CONF.debug")
if point != -1 and filename not in excluded_files:
yield(point, "N312 Don't use `CONF.debug`. "
"Function `rally.common.log.is_debug` "
"should be used instead.")
@skip_ignored_lines
def assert_true_instance(logical_line, filename):
"""Check for assertTrue(isinstance(a, b)) sentences
N320
"""
if re_assert_true_instance.match(logical_line):
yield (0, "N320 assertTrue(isinstance(a, b)) sentences not allowed, "
"you should use assertIsInstance(a, b) instead.")
@skip_ignored_lines
def assert_equal_type(logical_line, filename):
"""Check for assertEqual(type(A), B) sentences
N321
"""
if re_assert_equal_type.match(logical_line):
yield (0, "N321 assertEqual(type(A), B) sentences not allowed, "
"you should use assertIsInstance(a, b) instead.")
@skip_ignored_lines
def assert_equal_none(logical_line, filename):
"""Check for assertEqual(A, None) or assertEqual(None, A) sentences
N322
"""
res = (re_assert_equal_start_with_none.search(logical_line) or
re_assert_equal_end_with_none.search(logical_line))
if res:
yield (0, "N322 assertEqual(A, None) or assertEqual(None, A) "
"sentences not allowed, you should use assertIsNone(A) "
"instead.")
@skip_ignored_lines
def assert_true_or_false_with_in(logical_line, filename):
"""Check assertTrue/False(A in/not in B) with collection contents
Check for assertTrue/False(A in B), assertTrue/False(A not in B),
assertTrue/False(A in B, message) or assertTrue/False(A not in B, message)
sentences.
N323
"""
res = (re_assert_true_false_with_in_or_not_in.search(logical_line) or
re_assert_true_false_with_in_or_not_in_spaces.search(logical_line))
if res:
yield (0, "N323 assertTrue/assertFalse(A in/not in B)sentences not "
"allowed, you should use assertIn(A, B) or assertNotIn(A, B)"
" instead.")
@skip_ignored_lines
def assert_equal_in(logical_line, filename):
"""Check assertEqual(A in/not in B, True/False) with collection contents
Check for assertEqual(A in B, True/False), assertEqual(True/False, A in B),
assertEqual(A not in B, True/False) or assertEqual(True/False, A not in B)
sentences.
N324
"""
res = (re_assert_equal_in_end_with_true_or_false.search(logical_line) or
re_assert_equal_in_start_with_true_or_false.search(logical_line))
if res:
yield (0, "N324: Use assertIn/NotIn(A, B) rather than "
"assertEqual(A in/not in B, True/False) when checking "
"collection contents.")
@skip_ignored_lines
def check_no_direct_rally_objects_import(logical_line, filename):
"""Check if rally.objects are properly imported.
If you import "from rally import objects" you are able to use objects
directly like objects.Task.
N340
"""
if filename == "./rally/objects/__init__.py":
return
if (logical_line.startswith("from rally.objects")
or logical_line.startswith("import rally.objects.")):
yield (0, "N340: Import objects module: `from rally import objects`. "
"After that you can use directly objects e.g. objects.Task")
@skip_ignored_lines
def check_no_oslo_deprecated_import(logical_line, filename):
"""Check if oslo.foo packages are not imported instead of oslo_foo ones.
Libraries from oslo.foo namespace are deprecated because of namespace
problems.
N341
"""
if (logical_line.startswith("from oslo.")
or logical_line.startswith("import oslo.")):
yield (0, "N341: Import oslo module: `from oslo_xyz import ...`. "
"The oslo.xyz namespace was deprecated, use oslo_xyz "
"instead")
@skip_ignored_lines
def check_quotes(logical_line, filename):
"""Check that single quotation marks are not used
N350
"""
in_string = False
in_multiline_string = False
single_quotas_are_used = False
check_tripple = (
lambda line, i, char: (
i + 2 < len(line) and
(char == line[i] == line[i + 1] == line[i + 2])
)
)
i = 0
while i < len(logical_line):
char = logical_line[i]
if in_string:
if char == "\"":
in_string = False
if char == "\\":
i += 1 # ignore next char
elif in_multiline_string:
if check_tripple(logical_line, i, "\""):
i += 2 # skip next 2 chars
in_multiline_string = False
elif char == "#":
break
elif char == "'":
single_quotas_are_used = True
break
elif char == "\"":
if check_tripple(logical_line, i, "\""):
in_multiline_string = True
i += 3
continue
in_string = True
i += 1
if single_quotas_are_used:
yield (i, "N350 Remove Single quotes")
@skip_ignored_lines
def check_no_constructor_data_struct(logical_line, filename):
"""Check that data structs (lists, dicts) are declared using literals
N351
"""
match = re_no_construct_dict.search(logical_line)
if match:
yield (0, "N351 Remove dict() construct and use literal {}")
match = re_no_construct_list.search(logical_line)
if match:
yield (0, "N351 Remove list() construct and use literal []")
def factory(register):
register(check_assert_methods_from_mock)
register(check_import_of_logging)
register(no_translate_debug_logs)
register(no_use_conf_debug_check)
register(assert_true_instance)
register(assert_equal_type)
register(assert_equal_none)
register(assert_true_or_false_with_in)
register(assert_equal_in)
register(check_no_direct_rally_objects_import)
register(check_no_oslo_deprecated_import)
register(check_quotes)
register(check_no_constructor_data_struct)
| apache-2.0 |
amarian12/p2pool-adaptive-drk | fpconst.py | 310 | 5754 | """Utilities for handling IEEE 754 floating point special values
This python module implements constants and functions for working with
IEEE754 double-precision special values. It provides constants for
Not-a-Number (NaN), Positive Infinity (PosInf), and Negative Infinity
(NegInf), as well as functions to test for these values.
The code is implemented in pure python by taking advantage of the
'struct' standard module. Care has been taken to generate proper
results on both big-endian and little-endian machines. Some efficiency
could be gained by translating the core routines into C.
See <http://babbage.cs.qc.edu/courses/cs341/IEEE-754references.html>
for reference material on the IEEE 754 floating point standard.
Further information on this package is available at
<http://www.analytics.washington.edu/statcomp/projects/rzope/fpconst/>.
------------------------------------------------------------------
Author: Gregory R. Warnes <[email protected]>
Date: 2005-02-24
Version: 0.7.2
Copyright: (c) 2003-2005 Pfizer, Licensed to PSF under a Contributor Agreement
License: Licensed under the Apache License, Version 2.0 (the"License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in
writing, software distributed under the License is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See
the License for the specific language governing
permissions and limitations under the License.
------------------------------------------------------------------
"""
__version__ = "0.7.2"
ident = "$Id: fpconst.py,v 1.16 2005/02/24 17:42:03 warnes Exp $"
import struct, operator
# check endianess
_big_endian = struct.pack('i',1)[0] != '\x01'
# and define appropriate constants
if(_big_endian):
NaN = struct.unpack('d', '\x7F\xF8\x00\x00\x00\x00\x00\x00')[0]
PosInf = struct.unpack('d', '\x7F\xF0\x00\x00\x00\x00\x00\x00')[0]
NegInf = -PosInf
else:
NaN = struct.unpack('d', '\x00\x00\x00\x00\x00\x00\xf8\xff')[0]
PosInf = struct.unpack('d', '\x00\x00\x00\x00\x00\x00\xf0\x7f')[0]
NegInf = -PosInf
def _double_as_bytes(dval):
"Use struct.unpack to decode a double precision float into eight bytes"
tmp = list(struct.unpack('8B',struct.pack('d', dval)))
if not _big_endian:
tmp.reverse()
return tmp
##
## Functions to extract components of the IEEE 754 floating point format
##
def _sign(dval):
"Extract the sign bit from a double-precision floating point value"
bb = _double_as_bytes(dval)
return bb[0] >> 7 & 0x01
def _exponent(dval):
"""Extract the exponentent bits from a double-precision floating
point value.
Note that for normalized values, the exponent bits have an offset
of 1023. As a consequence, the actual exponentent is obtained
by subtracting 1023 from the value returned by this function
"""
bb = _double_as_bytes(dval)
return (bb[0] << 4 | bb[1] >> 4) & 0x7ff
def _mantissa(dval):
"""Extract the _mantissa bits from a double-precision floating
point value."""
bb = _double_as_bytes(dval)
mantissa = bb[1] & 0x0f << 48
mantissa += bb[2] << 40
mantissa += bb[3] << 32
mantissa += bb[4]
return mantissa
def _zero_mantissa(dval):
"""Determine whether the mantissa bits of the given double are all
zero."""
bb = _double_as_bytes(dval)
return ((bb[1] & 0x0f) | reduce(operator.or_, bb[2:])) == 0
##
## Functions to test for IEEE 754 special values
##
def isNaN(value):
"Determine if the argument is a IEEE 754 NaN (Not a Number) value."
return (_exponent(value)==0x7ff and not _zero_mantissa(value))
def isInf(value):
"""Determine if the argument is an infinite IEEE 754 value (positive
or negative inifinity)"""
return (_exponent(value)==0x7ff and _zero_mantissa(value))
def isFinite(value):
"""Determine if the argument is an finite IEEE 754 value (i.e., is
not NaN, positive or negative inifinity)"""
return (_exponent(value)!=0x7ff)
def isPosInf(value):
"Determine if the argument is a IEEE 754 positive infinity value"
return (_sign(value)==0 and _exponent(value)==0x7ff and \
_zero_mantissa(value))
def isNegInf(value):
"Determine if the argument is a IEEE 754 negative infinity value"
return (_sign(value)==1 and _exponent(value)==0x7ff and \
_zero_mantissa(value))
##
## Functions to test public functions.
##
def test_isNaN():
assert( not isNaN(PosInf) )
assert( not isNaN(NegInf) )
assert( isNaN(NaN ) )
assert( not isNaN( 1.0) )
assert( not isNaN( -1.0) )
def test_isInf():
assert( isInf(PosInf) )
assert( isInf(NegInf) )
assert( not isInf(NaN ) )
assert( not isInf( 1.0) )
assert( not isInf( -1.0) )
def test_isFinite():
assert( not isFinite(PosInf) )
assert( not isFinite(NegInf) )
assert( not isFinite(NaN ) )
assert( isFinite( 1.0) )
assert( isFinite( -1.0) )
def test_isPosInf():
assert( isPosInf(PosInf) )
assert( not isPosInf(NegInf) )
assert( not isPosInf(NaN ) )
assert( not isPosInf( 1.0) )
assert( not isPosInf( -1.0) )
def test_isNegInf():
assert( not isNegInf(PosInf) )
assert( isNegInf(NegInf) )
assert( not isNegInf(NaN ) )
assert( not isNegInf( 1.0) )
assert( not isNegInf( -1.0) )
# overall test
def test():
test_isNaN()
test_isInf()
test_isFinite()
test_isPosInf()
test_isNegInf()
if __name__ == "__main__":
test()
| gpl-3.0 |
anthraxx/fips | mod/tools/git.py | 2 | 7644 | """wrapper for some git commands"""
import re
import subprocess
from mod import log
name = 'git'
platforms = ['linux', 'osx', 'win']
optional = False
not_found = "git not found in path, can't happen(?)"
# default git clone depth
clone_depth = 10
#-------------------------------------------------------------------------------
def check_exists(fips_dir=None) :
"""test if git is in the path
:returns: True if git is in the path
"""
try :
subprocess.check_output(['git', '--version'])
return True
except (OSError, subprocess.CalledProcessError) :
return False
#-------------------------------------------------------------------------------
def clone(url, branch, depth, name, cwd) :
"""git clone a remote git repo
:param url: the git url to clone from
:param branch: branch name (can be None)
:param depth: how deep to clone
:param name: the directory name to clone into
:param cwd: the directory where to run git
:returns: True if git returns successful
"""
if check_exists() :
cmd = 'git clone --recursive'
if branch :
cmd += ' --branch {} --single-branch'.format(branch)
if depth :
cmd += ' --depth {}'.format(depth)
cmd += ' {} {}'.format(url, name)
res = subprocess.call(cmd, cwd=cwd, shell=True)
return res == 0
else :
log.error("git not found, please run and fix './fips diag tools'")
return False
#-------------------------------------------------------------------------------
def get_branches(proj_dir) :
"""get a dictionary with all local branch names of a git repo as keys,
and their remote branch names as value
:param proj_dir: a git repo dir
:returns: dictionary of all local and remote branches
"""
branches = {}
try:
output = subprocess.check_output('git branch -vv', cwd=proj_dir, shell=True)
lines = output.splitlines()
for line in lines :
tokens = line[2:].split()
local_branch = tokens[0]
if re.compile("^\[.*(:|\])$").match(tokens[2]) :
remote_branch = tokens[2][1:-1]
branches[local_branch] = remote_branch
except subprocess.CalledProcessError :
log.error("failed to call 'git branch -vv'")
return branches;
#-------------------------------------------------------------------------------
def checkout(proj_dir, revision) :
"""checkout a specific revision hash of a repository
:param proj_dir: a git repo dir
:param revision: SHA1 hash of the commit
:returns: True if git returns successful
"""
try :
output = subprocess.check_output('git checkout {}'.format(revision), cwd=proj_dir, shell=True)
return output.split(':')[0] != 'error'
except subprocess.CalledProcessError :
log.error("failed to call 'git checkout'")
return None
#-------------------------------------------------------------------------------
def has_uncommitted_files(proj_dir) :
"""check whether a git repo has uncommitted files
:param proj_dir: a git repo dir
:returns: True/False and output string
"""
try :
output = subprocess.check_output('git status -s', cwd=proj_dir, shell=True)
if len(output) > 0 :
return True, output
else :
return False, output
except subprocess.CalledProcessError :
log.error("failed to call 'git status -s'")
return False, ''
#-------------------------------------------------------------------------------
def get_remote_rev(proj_dir, remote_branch) :
"""get the head rev of a remote branch
:param proj_dir: a git repo dir
:param remote_branch: remote branch (e.g. origin/master)
:returns: the revision string of the remote branch head or None
"""
tokens = remote_branch.split('/')
try :
output = subprocess.check_output('git ls-remote {} {}'.format(tokens[0], tokens[1]), cwd=proj_dir, shell=True)
# can return an empty string if the remote branch doesn't exist
if output != '':
return output.split()[0]
else :
return None
except subprocess.CalledProcessError :
log.error("failed to call 'git ls-remote'")
return None
#-------------------------------------------------------------------------------
def get_local_rev(proj_dir, local_branch) :
"""get the head rev of a local branch
:param proj_dir: a git repo dir
:param local_branch: local branch name (e.g. master)
:returns: the revision string of the local branch head or None
"""
try :
output = subprocess.check_output('git rev-parse {}'.format(local_branch), cwd=proj_dir, shell=True)
return output.rstrip()
except subprocess.CalledProcessError :
log.error("failed to call 'git rev-parse'")
return None
#-------------------------------------------------------------------------------
def check_out_of_sync(proj_dir) :
"""check through all branches of the git repo in proj_dir and
returns an array of all branches that are out-of-sync with their
remote branches (either have unpushed local changes, or un-pulled
remote changes)
:param proj_dir: a git repo directory
:returns: array with branch names that are out-of-sync
"""
if not check_exists() :
log.error("git not found, please run and fix './fips diag tools'")
return False
out_of_sync = False
# first check whether there are uncommitted changes
status, status_output = has_uncommitted_files(proj_dir)
if status :
out_of_sync = True
log.warn("'{}' has uncommitted changes:".format(proj_dir))
log.info(status_output)
# check whether local and remote branch are out of sync
branches_out_of_sync = False
branches = get_branches(proj_dir)
if not branches :
log.warn("'{}' no remote branches found".format(proj_dir))
for local_branch in branches :
remote_branch = branches[local_branch]
remote_rev = get_remote_rev(proj_dir, remote_branch)
# remote_rev can be None if the remote branch doesn't exists,
# this is not an error
if remote_rev :
local_rev = get_local_rev(proj_dir, local_branch)
if remote_rev != local_rev :
out_of_sync = True
if not branches_out_of_sync:
# only show this once
log.warn("'{}' branches out of sync:".format(proj_dir))
branches_out_of_sync = True
log.info(" {}: {}".format(local_branch, local_rev))
log.info(" {}: {}".format(remote_branch, remote_rev))
return out_of_sync
#-------------------------------------------------------------------------------
def check_branch_out_of_sync(proj_dir, branch) :
"""check if a single branch is out of sync with remote repo"""
if not check_exists() :
log.error("git not found, please run and fix './fips diag tools'")
return False
out_of_sync = False
remote_branches = get_branches(proj_dir)
local_rev = get_local_rev(proj_dir, branch)
if branch in remote_branches :
remote_rev = get_remote_rev(proj_dir, remote_branches[branch])
out_of_sync = remote_rev != local_rev
else :
log.warn("'{}' no remote branch found for '{}'".format(proj_dir, branch))
return out_of_sync
| mit |
mxamin/youtube-dl | youtube_dl/extractor/ustream.py | 1 | 5485 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
)
class UstreamIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<id>\d+)'
IE_NAME = 'ustream'
_TESTS = [{
'url': 'http://www.ustream.tv/recorded/20274954',
'md5': '088f151799e8f572f84eb62f17d73e5c',
'info_dict': {
'id': '20274954',
'ext': 'flv',
'title': 'Young Americans for Liberty February 7, 2012 2:28 AM',
'description': 'Young Americans for Liberty February 7, 2012 2:28 AM',
'timestamp': 1328577035,
'upload_date': '20120207',
'uploader': 'yaliberty',
'uploader_id': '6780869',
},
}, {
# From http://sportscanada.tv/canadagames/index.php/week2/figure-skating/444
# Title and uploader available only from params JSON
'url': 'http://www.ustream.tv/embed/recorded/59307601?ub=ff0000&lc=ff0000&oc=ffffff&uc=ffffff&v=3&wmode=direct',
'md5': '5a2abf40babeac9812ed20ae12d34e10',
'info_dict': {
'id': '59307601',
'ext': 'flv',
'title': '-CG11- Canada Games Figure Skating',
'uploader': 'sportscanadatv',
},
'skip': 'This Pro Broadcaster has chosen to remove this video from the ustream.tv site.',
}, {
'url': 'http://www.ustream.tv/embed/10299409',
'info_dict': {
'id': '10299409',
},
'playlist_count': 3,
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
# some sites use this embed format (see: https://github.com/rg3/youtube-dl/issues/2990)
if m.group('type') == 'embed/recorded':
video_id = m.group('id')
desktop_url = 'http://www.ustream.tv/recorded/' + video_id
return self.url_result(desktop_url, 'Ustream')
if m.group('type') == 'embed':
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
content_video_ids = self._parse_json(self._search_regex(
r'ustream\.vars\.offAirContentVideoIds=([^;]+);', webpage,
'content video IDs'), video_id)
return self.playlist_result(
map(lambda u: self.url_result('http://www.ustream.tv/recorded/' + u, 'Ustream'), content_video_ids),
video_id)
params = self._download_json(
'https://api.ustream.tv/videos/%s.json' % video_id, video_id)
error = params.get('error')
if error:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error), expected=True)
video = params['video']
title = video['title']
filesize = float_or_none(video.get('file_size'))
formats = [{
'id': video_id,
'url': video_url,
'ext': format_id,
'filesize': filesize,
} for format_id, video_url in video['media_urls'].items()]
self._sort_formats(formats)
description = video.get('description')
timestamp = int_or_none(video.get('created_at'))
duration = float_or_none(video.get('length'))
view_count = int_or_none(video.get('views'))
uploader = video.get('owner', {}).get('username')
uploader_id = video.get('owner', {}).get('id')
thumbnails = [{
'id': thumbnail_id,
'url': thumbnail_url,
} for thumbnail_id, thumbnail_url in video.get('thumbnail', {}).items()]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnails': thumbnails,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
'uploader_id': uploader_id,
'formats': formats,
}
class UstreamChannelIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ustream\.tv/channel/(?P<slug>.+)'
IE_NAME = 'ustream:channel'
_TEST = {
'url': 'http://www.ustream.tv/channel/channeljapan',
'info_dict': {
'id': '10874166',
},
'playlist_mincount': 17,
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
display_id = m.group('slug')
webpage = self._download_webpage(url, display_id)
channel_id = self._html_search_meta('ustream:channel_id', webpage)
BASE = 'http://www.ustream.tv'
next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
video_ids = []
while next_url:
reply = self._download_json(
compat_urlparse.urljoin(BASE, next_url), display_id,
note='Downloading video information (next: %d)' % (len(video_ids) + 1))
video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data']))
next_url = reply['nextUrl']
entries = [
self.url_result('http://www.ustream.tv/recorded/' + vid, 'Ustream')
for vid in video_ids]
return {
'_type': 'playlist',
'id': channel_id,
'display_id': display_id,
'entries': entries,
}
| unlicense |
ThibaultReuille/graphiti | Scripts/console/graph.py | 1 | 6448 | import script
from script import *
class Topology(script.Script):
def neighbors(self, args):
new_neighbors = list()
if 'nodes' in self.console.query:
graph = std.load_nx_graph()
for nid in self.console.query['nodes']:
for neighbor in graph.neighbors(nid):
if neighbor not in self.console.query['nodes']:
new_neighbors.append(neighbor)
for nn in new_neighbors:
self.console.query['nodes'].append(nn)
self.console.print_query()
def connected_components(self, args):
og.set_attribute("graphiti:space:edgemode", "string", "node_color")
graph = std.load_nx_graph()
cc = nx.connected_components(graph)
for component in cc:
r = random.random()
g = random.random()
b = random.random()
color = str(r) + " " + str(g) + " " + str(b)
for node in component:
og.set_node_attribute(node, "graphiti:space:color", "vec3", color)
def directions(self, args):
for id in og.get_edge_ids():
og.set_edge_attribute(id, "og:space:icon", "string", "styles/triangles")
def connections(self, args):
og.set_attribute("graphiti:space:edgemode", "string", "node_color")
# Building node degree table ...
edges = og.get_edge_ids()
degree_table = dict()
for eid in edges:
nid1 = og.get_edge_node1(eid)
nid2 = og.get_edge_node2(eid)
if nid1 not in degree_table:
degree_table[nid1] = { "in" : 0, "out" : 0 }
if nid2 not in degree_table:
degree_table[nid2] = { "in" : 0, "out" : 0 }
degree_table[nid1]["out"] += 1
degree_table[nid2]["in"] += 1
# Randomizing color map
m = dict()
m["isolated"] = [0.95, 0.98, 0.36, 1.0]
m["leaf"] = [0.06, 0.94, 0.61, 1.0]
m["source"] = [0.91, 0.18, 0.17, 1.0]
m["sink"] = [0.03, 0.65, 0.94, 1.0]
m["other"] = [0.77, 0.78, 0.75, 1.0]
# Coloring
for nid in og.get_node_ids():
if nid not in degree_table:
t = "isolated"
else:
deg = degree_table[nid]
if deg["in"] == 0 and deg["out"] == 1:
t = "leaf"
elif deg["in"] == 0 and deg["out"] > 1:
t = "source"
elif deg["in"] > 0 and deg["out"] == 0:
t = "sink"
else:
t = "other"
og.set_node_attribute(nid, "graphiti:space:color", "vec4", std.vec4_to_str(m[t]))
def degrees_high(self):
og.set_attribute("graphiti:space:edgemode", "string", "node_color")
graph = std.load_nx_graph()
max_degree = max(nx.degree(graph).values())
for n in graph.nodes(data = True):
deg = nx.degree(graph, n[0])
tint = 0.3 + 0.9 * float(deg) / float(max_degree)
color = og.get_node_attribute(n[0], "graphiti:space:color")
color[0] = tint * color[0]
color[1] = tint * color[1]
color[2] = tint * color[2]
color[3] = 1.0
c = str(color[0]) + " " + str(color[1]) + " " + str(color[2])
og.set_node_attribute(n[0], "graphiti:space:color", "vec3", c)
def degrees_low(self):
og.set_attribute("graphiti:space:edgemode", "string", "node_color")
graph = std.load_nx_graph()
max_degree = max(nx.degree(graph).values())
for n in graph.nodes(data = True):
deg = nx.degree(graph, n[0])
tint = 0.3 + 0.9 * (1.0 - float(deg) / float(max_degree))
color = og.get_node_attribute(n[0], "graphiti:space:color")
color[0] = tint * color[0]
color[1] = tint * color[1]
color[2] = tint * color[2]
c = str(color[0]) + " " + str(color[1]) + " " + str(color[2])
og.set_node_attribute(n[0], "graphiti:space:color", "vec3", c)
def degrees(self, args):
if len(args) == 1:
self.degrees_high()
elif len(args) == 2 and args[1] == "high":
self.degrees_high()
elif len(args) == 2 and args[1] == "low":
self.degrees_low()
else:
self.console.log("Error: {0}: Wrong arguments!".format(args[0]))
def get_degree_map(self):
degrees = dict()
for eid in og.get_edge_ids():
bi = False
e_type = og.get_edge_attribute(eid, "type")
if e_type is not None and "<->" in e_type:
bi = True
nid1 = og.get_edge_node1(eid)
nid2 = og.get_edge_node2(eid)
if nid1 not in degrees:
degrees[nid1] = { "in" : 0, "out" : 0 }
if nid2 not in degrees:
degrees[nid2] = { "in" : 0, "out" : 0 }
if bi:
degrees[nid1]["in"] += 1
degrees[nid1]["out"] += 1
degrees[nid2]["in"] += 1
degrees[nid2]["out"] += 1
else:
degrees[nid1]["out"] += 1
degrees[nid2]["in"] += 1
return degrees
def spn(self, args):
degree_map = self.get_degree_map()
source_map = dict()
for eid in og.get_edge_ids():
src = og.get_edge_node1(eid)
if src not in degree_map:
continue
if degree_map[src]["in"] == 0 and degree_map[src]["out"] >= 0:
dst = og.get_edge_node2(eid)
if src not in source_map:
source_map[src] = [(dst, eid)]
elif dst not in source_map[src]:
source_map[src].append((dst, eid))
for nid in og.get_node_ids():
og.set_node_attribute(nid, "og:space:lod", "float", "0.0")
for eid in og.get_edge_ids():
og.set_edge_attribute(eid, "og:space:lod", "float", "0.0")
for source in source_map.keys():
og.set_node_attribute(source, "og:space:lod", "float", "1.0")
for successor in source_map[source]:
og.set_node_attribute(successor[0], "og:space:lod", "float", "1.0")
og.set_edge_attribute(successor[1], "og:space:lod", "float", "1.0")
def run(self, args):
if len(args) == 2 and args[1] == "neighbors":
self.neighbors(args)
elif len(args) == 2 and args[1] == "cc":
self.connected_components(args)
elif len(args) == 2 and args[1] == "directions":
self.directions(args)
elif len(args) == 2 and args[1] == "connections":
self.connections(args)
elif len(args) >= 2 and args[1] == "degrees":
self.degrees(args[1:])
elif len(args) == 2 and args[1] == "spn":
self.spn(args)
else:
self.console.log("Error: {0}: Wrong arguments!".format(args[0]))
| bsd-2-clause |
elbeardmorez/quodlibet | quodlibet/tests/quality/util.py | 1 | 1751 | # -*- coding: utf-8 -*-
# Copyright 2017 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
from collections import namedtuple
try:
import configparser
except ImportError:
import ConfigParser as configparser
import quodlibet
from quodlibet.util import get_module_dir
SetupConfig = namedtuple("SetupConfig", ["ignore", "builtins", "exclude"])
def parse_setup_cfg():
"""Parses the flake8 config from the setup.cfg file in the root dir
Returns:
SetupConfig
"""
base_dir = os.path.dirname(get_module_dir(quodlibet))
cfg = os.path.join(base_dir, "setup.cfg")
config = configparser.RawConfigParser()
config.read(cfg)
ignore = str(config.get("flake8", "ignore")).split(",")
builtins = str(config.get("flake8", "builtins")).split(",")
exclude = str(config.get("flake8", "exclude")).split(",")
exclude = [
os.path.join(base_dir, e.replace("/", os.sep)) for e in exclude]
return SetupConfig(ignore, builtins, exclude)
setup_cfg = parse_setup_cfg()
def iter_py_files(root):
for base, dirs, files in os.walk(root):
for file_ in files:
path = os.path.join(base, file_)
if os.path.splitext(path)[1] == ".py":
yield path
def iter_project_py_files():
root = os.path.dirname(get_module_dir(quodlibet))
skip = setup_cfg.exclude
for path in iter_py_files(root):
if any((path.startswith(s + os.sep) or s == path)
for s in skip):
continue
yield path
| gpl-2.0 |
erwin00776/comment_label_worm | bow/comment_download_cut.py | 1 | 10195 | __author__ = 'erwin'
#coding=utf-8
import codecs
import urllib2
import re
import threading
import time
import os
import jieba
query_done_set = {}
def cut_line(line):
seg_list = jieba.cut(line, cut_all=False)
return " ".join(seg_list)
def cut_words(src, dst):
fin = codecs.open(src, 'r', 'utf-8')
fout = codecs.open(dst, 'w', 'utf-8')
for line in fin.readlines():
line = line.strip()
prefix = 9
if line.find("contenttitle") > 0:
prefix = 15
l = len(line)
if l <= prefix*2+1:
continue
line = line[prefix:l-prefix]
fout.write(cut_line(line))
fin.close()
fout.close()
def download_comments(itemid, sellerid, query='', class_name=None):
if class_name is None:
file_name = "/users/erwin/tmp/tmall_comments_%s_%s_%s" % (query, itemid, sellerid)
else:
file_name = ""
fout = open(file_name, 'w')
prev_line = None
for pageid in range(1, 50):
url_prefix = "http://rate.tmall.com/list_detail_rate.htm?itemId=%s&spuId=&sellerId=%s&order=1¤tPage=%d" % (itemid, sellerid, pageid)
url = url_prefix + '&append=0&content=1&tagId=&posi=&picture=&ua=248YlJgThc3UTYWOBgrdllqXG9aa1liUGtefiF%2B%7CYVJ8T3lKekp5Q3FGdUN2RRo%3D%7CYFB%2BJwdWNVEyQikJJwc9HTMTXD9efiF%2B%7CZ1RkSmpZb1xsXG9VZ1BjVWBTa1l1RnZBcUp%2FRXJFdENzSHJCeEJiPQ%3D%3D%7CZlVuQBkoBjUHNgIsHDISIBc3aDc%3D%7CZVR6IxMiDD4QIxMpGzUOOQ48EiERKxk3DT8NIxAgGigGMwM2aTY%3D%7CZFJ8JQUlCzELPRMiEigSPAk7DThnOA%3D%3D%7Ca11zKgoqBDEBNRsrHS4eMAs%2BBD9gPw%3D%3D%7CalxyKwsrBTIJMx0uFSUQPg03BzIJVgk%3D%7CaV9xKAgoBj0NOhQnECAXOQk7CDkNUg0%3D%7CaF9xKAgoBl9kUWRKeU5%2FTBM9DyEBIQ8%2FDTgJO2Q7%7Cb1l3Lg4uADUCMx0uGiofMQEyAzEBXgE%3D%7Cbll3Lg4uAFlsXm9BckZwQR4wAiwMLAIyATYAM2wz%7CbVt1LAwsAjkNPRMgESESPAw9DjkJVgk%3D%7CbFt1LAwsAltgVW9BckNyRxg2BCoKKgQ0BTIGMG8w%7Cc0VrMhIyHCkZLgAxADYNIxMmEyEWSRY%3D%7CckRqMxMzHSYRJwk4CToPIRErGCkcQxw%3D%7CcUZoMRExH0Z9SnxSY1JhVHpBckNvW2tcckR1R2lSYU9%2BRRo0BigIKAY2DD0KO2Q7%7CcEZ0RmhadERqWW9cckN0QW9Zd0R2WGNNd1lsQnFGaF9xQnNdbEJxQmxfcUNtVwg%3D&_ksTS=1412218079834_3853&callback=jsonp3854'
try:
page = None
try_times = 0
line = None
while page is None and try_times < 5:
page = urllib2.urlopen(url, timeout=3)
try_times += 1
line = page.read()
if len(line) <= 256:
continue
if not line is None:
if not prev_line is None and prev_line == line:
break
fout.write(line)
prev_line = line
if pageid % 10 == 0:
time.sleep(1)
except:
print("exception %s" % url)
pass
page.close()
print('download comment %s page %d' % (query, pageid))
fout.close()
def get_item_seller_id(file_path, query=''):
fin = open(file_path, 'r')
is_tmall = False
for line in fin.readlines():
line = line.strip()
if line.find('data-pid') > 0:
if line.find('tmall') > 0:
is_tmall = True
else:
is_tmall = False
if is_tmall and line.find('col seller feature-dsi-tgr') > 0:
sid = re.search('sid=\d+', line).group()
bid = re.search('bid=\d+', line).group()
sid = sid.split('=')[1] # seller id
bid = bid.split('=')[1] # item id
download_comments(itemid=bid, sellerid=sid, query=query)
print("download comment %s %s %s done." % (query, bid, sid))
fin.close()
class CommentDownloader(threading.Thread):
def __init__(self, query, class_name=None):
threading.Thread.__init__(self)
self.query = query
self.max_retries = 3
self.commodity_per_query = 10
self.class_name = class_name
self.base_dir = '/Users/erwin/work/comment_labeled/raw_comments'
def run(self):
retries = 0
ret = 1
while retries < self.max_retries and ret != 0:
ret = self.try_to_download()
retries += 1
def try_to_download(self):
try:
if self.class_name is None:
file_path = '/users/erwin/tmp/tmall_search_result_' + self.query
else:
file_path = os.path.join(self.base_dir, self.class_name, 'tmall_search_result_' + self.query)
fout = open(file_path, 'w')
url = "http://s.taobao.com/search?spm=a230r.1.8.3.VcgCfO&sort=sale-desc&initiative_id=staobaoz_20141002&tab=all&q=%s" % self.query
#url = url_encode(url+'&stats_click=search_radio_all%253A1#J_relative')
url = "http://list.tmall.com/search_product.htm?q=%s" % urllib2.quote(url_encode(self.query))
page = urllib2.urlopen(url, timeout=3)
fout.write(page.read())
fout.close()
self.get_item_seller_id(file_path, query=self.query)
page.close()
print("[%s] download query %s done." % (self.getName(), self.query))
return 0
except KeyError:
print("[%s] download query %s failed." % (self.getName(), self.query))
return -1
def get_item_seller_id(self, file_path, query):
fin = open(file_path, 'r')
commodity_count = 0
for line in fin.readlines():
line = line.strip()
if line.find('//detail.tmall.com/item.htm') > 0 and line.find('abbucket') > 0:
tmp_sid = re.search('id=\d+', line).group()
tmp_uid = re.search('user_id=\d+', line).group()
sid = tmp_sid.split('=')[1]
uid = tmp_uid.split('=')[1]
self.download_comments(itemid=sid, sellerid=uid, query=query)
print("[%s] download comment %s %s %s done." % (self.getName(), query, uid, sid))
time.sleep(1)
commodity_count += 1
if commodity_count > self.commodity_per_query:
return
def download_comments(self, itemid, sellerid, query=''):
if self.class_name is None:
file_name = "/users/erwin/tmp/tmall_comments_%s_%s_%s" % (query, itemid, sellerid)
else:
file_name = os.path.join(self.base_dir, self.class_name, 'tmall_comments_%s_%s_%s' % (query, itemid, sellerid))
fout = open(file_name, 'w')
prev_line = None
for pageid in range(1, 100):
url = "http://rate.tmall.com/list_detail_rate.htm?itemId=%s&spuId=&sellerId=%s&order=1" \
"¤tPage=%d" % (itemid, sellerid, pageid)
try:
page = None
try_times = 0
line = None
while page is None and try_times < 5:
page = urllib2.urlopen(url, timeout=3)
try_times += 1
line = page.read()
if len(line) <= 256:
continue
if not line is None:
if not prev_line is None and prev_line == line:
break
fout.write(line)
prev_line = line
except:
print("exception %s" % url)
pass
page.close()
print('download comment %s page %d' % (query, pageid))
fout.close()
def download_search_result(query=""):
try:
file_path = '/users/erwin/tmp/tmall_search_result_' + query
fout = open(file_path, 'w')
url = "http://s.taobao.com/search?spm=a230r.1.8.3.VcgCfO&sort=sale-desc&initiative_id=staobaoz_20141002&tab=all&q=%s" % query
url = url_encode(url+'&stats_click=search_radio_all%253A1#J_relative')
page = urllib2.urlopen(url, timeout=3)
fout.write(page.read())
fout.close()
get_item_seller_id(file_path, query=query)
page.close()
global query_done_set
query_done_set[query] = 1
print("download query %s done." % query)
except:
print("download query %s failed." % query)
def url_encode(url):
x = url.decode('UTF-8')
y = x.encode('GB2312')
return y
def download_all_comments(query_list, class_name=None):
global query_done_set
worker_set = set()
for query in query_list:
try:
while len(worker_set) > 3:
time.sleep(1)
worker_set_clone = worker_set.copy()
for t in worker_set_clone:
if t.isAlive():
t.join(timeout=1)
else:
worker_set.remove(t)
break
foo = CommentDownloader(query, class_name)
worker_set.add(foo)
foo.start()
except KeyError:
print("except %s, skip this." % query)
pass
for t in worker_set:
t.join()
if __name__ == '__main__':
query_list = ["中跟女鞋","短靴","英伦 复古 绒", "平底", "女式鞋单鞋","秋季女式鞋","单鞋","马丁靴","高跟鞋","头层牛皮"]
query_list = ["短靴", "中跟女鞋", "平跟 蝴蝶结", "马丁靴", "单鞋", "短筒", "高跟鞋", "女式鞋单鞋", "皮鞋", "皮鞋男", "商务休闲", "滑板鞋男款", "帆布鞋男款", "滑板鞋男", "马丁靴男", "男式鞋休闲鞋", "运动鞋男", "豆豆鞋男"]
query_list = ['茶叶', '食品', '水果', '牛奶', '零食', '饼干', '饮料', '牛肉干', '猪肉脯']
query_list = ['铁观音', '红茶', '绿茶', '普洱', '花茶', '龙井']
query_list = ['护肤套装', '面膜', '乳液', '面霜', '眼霜', '身体护理', '男士护理', '洁面', '化妆水', '精油芳疗', '丰胸', 'T区护理', '防晒'
'唇部护理', '去角化', '按摩霜', '敏感修护', '补水' ]
#query_list = ['化妆品']
for q in query_list:
download_all_comments([q], class_name='cosmetic')
#cut_words("/Users/erwin/tmp/all_tmall_comments", "/Users/erwin/tmp/all_tmall_comments.cut")
#download_comments(itemid='40887946035', sellerid='1579139371', query='clothes')
| mit |
oihane/odoo | addons/l10n_it/__init__.py | 447 | 1161 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010
# OpenERP Italian Community (<http://www.openerp-italia.org>)
# Servabit srl
# Agile Business Group sagl
# Domsense srl
# Albatos srl
#
# Copyright (C) 2011
# Associazione OpenERP Italia (<http://www.openerp-italia.org>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
| agpl-3.0 |
Eric89GXL/vispy | vispy/geometry/curves.py | 21 | 13145 | #
# Anti-Grain Geometry - Version 2.4
# Copyright (C) 2002-2005 Maxim Shemanarev (McSeem)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# Python translation by Nicolas P. Rougier
# Copyright (C) 2013 Nicolas P. Rougier. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY NICOLAS P. ROUGIER ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL NICOLAS P. ROUGIER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Nicolas P. Rougier.
#
import math
import numpy as np
curve_distance_epsilon = 1e-30
curve_collinearity_epsilon = 1e-30
curve_angle_tolerance_epsilon = 0.01
curve_recursion_limit = 32
m_cusp_limit = 0.0
m_angle_tolerance = 10 * math.pi / 180.0
m_approximation_scale = 1.0
m_distance_tolerance_square = (0.5 / m_approximation_scale)**2
def calc_sq_distance(x1, y1, x2, y2):
dx = x2 - x1
dy = y2 - y1
return dx * dx + dy * dy
def _curve3_recursive_bezier(points, x1, y1, x2, y2, x3, y3, level=0):
if level > curve_recursion_limit:
return
# Calculate all the mid-points of the line segments
x12 = (x1 + x2) / 2.
y12 = (y1 + y2) / 2.
x23 = (x2 + x3) / 2.
y23 = (y2 + y3) / 2.
x123 = (x12 + x23) / 2.
y123 = (y12 + y23) / 2.
dx = x3 - x1
dy = y3 - y1
d = math.fabs((x2 - x3) * dy - (y2 - y3) * dx)
if d > curve_collinearity_epsilon:
# Regular case
if d * d <= m_distance_tolerance_square * (dx * dx + dy * dy):
# If the curvature doesn't exceed the distance_tolerance value
# we tend to finish subdivisions.
if m_angle_tolerance < curve_angle_tolerance_epsilon:
points.append((x123, y123))
return
# Angle & Cusp Condition
da = math.fabs(
math.atan2(y3 - y2, x3 - x2) - math.atan2(y2 - y1, x2 - x1))
if da >= math.pi:
da = 2 * math.pi - da
if da < m_angle_tolerance:
# Finally we can stop the recursion
points.append((x123, y123))
return
else:
# Collinear case
da = dx * dx + dy * dy
if da == 0:
d = calc_sq_distance(x1, y1, x2, y2)
else:
d = ((x2 - x1) * dx + (y2 - y1) * dy) / da
if d > 0 and d < 1:
# Simple collinear case, 1---2---3, we can leave just two
# endpoints
return
if(d <= 0):
d = calc_sq_distance(x2, y2, x1, y1)
elif d >= 1:
d = calc_sq_distance(x2, y2, x3, y3)
else:
d = calc_sq_distance(x2, y2, x1 + d * dx, y1 + d * dy)
if d < m_distance_tolerance_square:
points.append((x2, y2))
return
# Continue subdivision
_curve3_recursive_bezier(points, x1, y1, x12, y12, x123, y123, level + 1)
_curve3_recursive_bezier(points, x123, y123, x23, y23, x3, y3, level + 1)
def _curve4_recursive_bezier(points, x1, y1, x2, y2, x3, y3, x4, y4, level=0):
if level > curve_recursion_limit:
return
# Calculate all the mid-points of the line segments
x12 = (x1 + x2) / 2.
y12 = (y1 + y2) / 2.
x23 = (x2 + x3) / 2.
y23 = (y2 + y3) / 2.
x34 = (x3 + x4) / 2.
y34 = (y3 + y4) / 2.
x123 = (x12 + x23) / 2.
y123 = (y12 + y23) / 2.
x234 = (x23 + x34) / 2.
y234 = (y23 + y34) / 2.
x1234 = (x123 + x234) / 2.
y1234 = (y123 + y234) / 2.
# Try to approximate the full cubic curve by a single straight line
dx = x4 - x1
dy = y4 - y1
d2 = math.fabs(((x2 - x4) * dy - (y2 - y4) * dx))
d3 = math.fabs(((x3 - x4) * dy - (y3 - y4) * dx))
s = int((d2 > curve_collinearity_epsilon) << 1) + \
int(d3 > curve_collinearity_epsilon)
if s == 0:
# All collinear OR p1==p4
k = dx * dx + dy * dy
if k == 0:
d2 = calc_sq_distance(x1, y1, x2, y2)
d3 = calc_sq_distance(x4, y4, x3, y3)
else:
k = 1. / k
da1 = x2 - x1
da2 = y2 - y1
d2 = k * (da1 * dx + da2 * dy)
da1 = x3 - x1
da2 = y3 - y1
d3 = k * (da1 * dx + da2 * dy)
if d2 > 0 and d2 < 1 and d3 > 0 and d3 < 1:
# Simple collinear case, 1---2---3---4
# We can leave just two endpoints
return
if d2 <= 0:
d2 = calc_sq_distance(x2, y2, x1, y1)
elif d2 >= 1:
d2 = calc_sq_distance(x2, y2, x4, y4)
else:
d2 = calc_sq_distance(x2, y2, x1 + d2 * dx, y1 + d2 * dy)
if d3 <= 0:
d3 = calc_sq_distance(x3, y3, x1, y1)
elif d3 >= 1:
d3 = calc_sq_distance(x3, y3, x4, y4)
else:
d3 = calc_sq_distance(x3, y3, x1 + d3 * dx, y1 + d3 * dy)
if d2 > d3:
if d2 < m_distance_tolerance_square:
points.append((x2, y2))
return
else:
if d3 < m_distance_tolerance_square:
points.append((x3, y3))
return
elif s == 1:
# p1,p2,p4 are collinear, p3 is significant
if d3 * d3 <= m_distance_tolerance_square * (dx * dx + dy * dy):
if m_angle_tolerance < curve_angle_tolerance_epsilon:
points.append((x23, y23))
return
# Angle Condition
da1 = math.fabs(
math.atan2(y4 - y3, x4 - x3) - math.atan2(y3 - y2, x3 - x2))
if da1 >= math.pi:
da1 = 2 * math.pi - da1
if da1 < m_angle_tolerance:
points.extend([(x2, y2), (x3, y3)])
return
if m_cusp_limit != 0.0:
if da1 > m_cusp_limit:
points.append((x3, y3))
return
elif s == 2:
# p1,p3,p4 are collinear, p2 is significant
if d2 * d2 <= m_distance_tolerance_square * (dx * dx + dy * dy):
if m_angle_tolerance < curve_angle_tolerance_epsilon:
points.append((x23, y23))
return
# Angle Condition
# ---------------
da1 = math.fabs(
math.atan2(y3 - y2, x3 - x2) - math.atan2(y2 - y1, x2 - x1))
if da1 >= math.pi:
da1 = 2 * math.pi - da1
if da1 < m_angle_tolerance:
points.extend([(x2, y2), (x3, y3)])
return
if m_cusp_limit != 0.0:
if da1 > m_cusp_limit:
points.append((x2, y2))
return
elif s == 3:
# Regular case
if (d2 + d3) * (d2 + d3) <= m_distance_tolerance_square * (
dx * dx + dy * dy):
# If the curvature doesn't exceed the distance_tolerance value
# we tend to finish subdivisions.
if m_angle_tolerance < curve_angle_tolerance_epsilon:
points.append((x23, y23))
return
# Angle & Cusp Condition
k = math.atan2(y3 - y2, x3 - x2)
da1 = math.fabs(k - math.atan2(y2 - y1, x2 - x1))
da2 = math.fabs(math.atan2(y4 - y3, x4 - x3) - k)
if da1 >= math.pi:
da1 = 2 * math.pi - da1
if da2 >= math.pi:
da2 = 2 * math.pi - da2
if da1 + da2 < m_angle_tolerance:
# Finally we can stop the recursion
points.append((x23, y23))
return
if m_cusp_limit != 0.0:
if da1 > m_cusp_limit:
points.append((x2, y2))
return
if da2 > m_cusp_limit:
points.append((x3, y3))
return
# Continue subdivision
_curve4_recursive_bezier(
points, x1, y1, x12, y12, x123, y123, x1234, y1234, level + 1)
_curve4_recursive_bezier(
points, x1234, y1234, x234, y234, x34, y34, x4, y4, level + 1)
def curve3_bezier(p1, p2, p3):
"""
Generate the vertices for a quadratic Bezier curve.
The vertices returned by this function can be passed to a LineVisual or
ArrowVisual.
Parameters
----------
p1 : array
2D coordinates of the start point
p2 : array
2D coordinates of the first curve point
p3 : array
2D coordinates of the end point
Returns
-------
coords : list
Vertices for the Bezier curve.
See Also
--------
curve4_bezier
Notes
-----
For more information about Bezier curves please refer to the `Wikipedia`_
page.
.. _Wikipedia: https://en.wikipedia.org/wiki/B%C3%A9zier_curve
"""
x1, y1 = p1
x2, y2 = p2
x3, y3 = p3
points = []
_curve3_recursive_bezier(points, x1, y1, x2, y2, x3, y3)
dx, dy = points[0][0] - x1, points[0][1] - y1
if (dx * dx + dy * dy) > 1e-10:
points.insert(0, (x1, y1))
dx, dy = points[-1][0] - x3, points[-1][1] - y3
if (dx * dx + dy * dy) > 1e-10:
points.append((x3, y3))
return np.array(points).reshape(len(points), 2)
def curve4_bezier(p1, p2, p3, p4):
"""
Generate the vertices for a third order Bezier curve.
The vertices returned by this function can be passed to a LineVisual or
ArrowVisual.
Parameters
----------
p1 : array
2D coordinates of the start point
p2 : array
2D coordinates of the first curve point
p3 : array
2D coordinates of the second curve point
p4 : array
2D coordinates of the end point
Returns
-------
coords : list
Vertices for the Bezier curve.
See Also
--------
curve3_bezier
Notes
-----
For more information about Bezier curves please refer to the `Wikipedia`_
page.
.. _Wikipedia: https://en.wikipedia.org/wiki/B%C3%A9zier_curve
"""
x1, y1 = p1
x2, y2 = p2
x3, y3 = p3
x4, y4 = p4
points = []
_curve4_recursive_bezier(points, x1, y1, x2, y2, x3, y3, x4, y4)
dx, dy = points[0][0] - x1, points[0][1] - y1
if (dx * dx + dy * dy) > 1e-10:
points.insert(0, (x1, y1))
dx, dy = points[-1][0] - x4, points[-1][1] - y4
if (dx * dx + dy * dy) > 1e-10:
points.append((x4, y4))
return np.array(points).reshape(len(points), 2)
| bsd-3-clause |
DBCDK/OpenSearch-webservice | script/analyze-timings.py | 1 | 10413 | #!/usr/bin/env python3
# This script reads a json file with timing information from OpenSearch, and
# checks that the timing information from all log lines "adds up", that is,
# that non-overlapping measurements covers more than 99% of the time spent in Total.
import argparse
import sys
import traceback
import json
import datetime
import re
################################################################################
# GLOBAL STUFF
################################################################################
# Some global variables that is mostly to handle default values.
# The name of our script - used in output.
script_name = "analyze-timings"
# These are convenient to not have to pass to all functions, etc.
# Could have been wrapped in a class, though.
do_debug = False
################################################################################
# LOG AND OUTPUT STUFF
################################################################################
# I can't figure out how to make this a (static) method in Colors, that can be called by attributes init.
def build_color(num):
return '\033[' + str(num) + 'm'
class Colors:
# Control
NORMAL = build_color(0)
BOLD = build_color(1)
UNDERLINE = build_color(4)
# Colors
GREEN = build_color(92)
BLUE = build_color(34)
YELLOW = build_color(93)
RED = build_color(91)
CYAN = build_color(96)
MAGENTA = build_color(95)
# Name is script name, rest is levels
NAME = GREEN
INFO = GREEN
WARN = YELLOW
DRYRUN = YELLOW
ERROR = RED
DEBUG = CYAN
TRACE = MAGENTA
UNKNOWN = RED
STAGENAME = BLUE
CHECKNAME = GREEN
@staticmethod
def remove_colors(string: str):
"""
Remove any color codes from a string, making it suitable for output to file, instead of terminal.
:param string: The string to remove color codes from.
:return: The input string, with color codes removed.
"""
return re.sub('\\033\\[\\d{1,2}m', '', string)
def output_log_msg(msg: str) -> None:
print(msg, flush=True)
def format_log_msg(level: str, msg: str) -> str:
"""
Format a string for log output. The level is colorized, if we are in an ssty.
The datetime added, is localtime.
:param level: The level (INFO, WARN, DEBUG, ...)
:param msg: The msg to output_msg.
:return: A formatted string.
"""
output = Colors.NAME + "[" + script_name + "] " + Colors.NORMAL + datetime.datetime.now().strftime("%T.%f") + " "
if level == "DEBUG":
output += Colors.DEBUG
elif level == "TRACE":
output += Colors.TRACE
elif level == "INFO":
output += Colors.INFO
elif level == "DRYRUN":
output += Colors.DRYRUN
elif level == "WARN":
output += Colors.WARN
elif level == "ERROR":
output += Colors.ERROR
elif level == "TODO":
output += Colors.YELLOW
else:
output += Colors.UNKNOWN
output += level + Colors.NORMAL + ": " + msg
if sys.stdout.isatty():
return output
else:
return Colors.remove_colors(output)
def info(msg: str) -> None:
"""
Output a msg at LOG level.
:param msg: The message to output.
"""
output_log_msg(format_log_msg("INFO", msg))
def warn(msg: str) -> None:
"""
Output a msg at WARN level.
:param msg: The message to output.
"""
output_log_msg(format_log_msg("WARN", msg))
def dryrun(msg: str) -> None:
"""
Output a msg at DRYRUN level.
:param msg: The message to output.
"""
output_log_msg(format_log_msg("DRYRUN", msg))
def error(msg: str) -> None:
"""
Output a msg at ERROR level.
:param msg: The message to output.
"""
output_log_msg(format_log_msg("ERROR", msg))
def trace(prefix="") -> None:
"""
Output a trace at TRACE level, if the global variable "do_trace" is True
:param: Optional parameter to set before the func name. This can be used by e.g. classes.
"""
global do_trace
if do_trace:
top = traceback.extract_stack(None, 2)[0]
func_name = top[2]
output_log_msg(format_log_msg("TRACE", "Entering " + prefix + func_name))
def todo(msg: str) -> None:
"""
Output a msg at TODO level, if the global variable "do_debug" is True
:param msg: The message to output.
"""
global do_debug
if do_debug:
output_log_msg(format_log_msg("TODO", msg))
def debug(msg: str) -> None:
"""
Output a msg at DEBUG level, if the global variable "do_debug" is True
:param msg: The message to output.
"""
global do_debug
if do_debug:
output_log_msg(format_log_msg("DEBUG", msg))
################################################################################
# PARSE ARGS AND MAIN
################################################################################
def get_args() -> argparse.Namespace:
"""
Configure the argument parsing system, and run it, to obtain the arguments given on the commandline.
:return: The parsed arguments.
"""
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("filename", help="The name of the file containing json data.")
parser.add_argument("-d", "--debug", action="store_true",
help="Output extra debug information")
parser.add_argument("-p", "--percentage", default=98,
type=float,
help="Check that the sums of non-overlapping measurements"
+ " covers at least this percentage of Total.")
parser.description = "Checks that the sums of non-overlapping measurements covers a percentage of Total"
parser.epilog = """
Examples:
Check a json measurement array using a file:
""" + sys.argv[0] + """ measurement.json
"""
args = parser.parse_args()
return args
def analyze_timing(request, target_percentage) -> bool:
"""
Analyze a single timing
:param request: Information about a request, including "timing" information.
:param target_percentage: The target percentage for the coverage of Total
"""
# Get the total
action = request['action']
timestamp = request['timestamp']
timing = request['timing']
total = timing['Total']
debug("Total for request for action " + action + ": " + str(total))
# The approach is to get all durations (except total), and remove any overlapping durations. This is done by using
# the durations with the earliest start, and have any durations that start in that duration, be removed.
# This is an heuristic to see if we cover most of the Total timing or not.
# This is not foolproof, but a best effort. Measurements that does not live up to the percentage, will be flagged.
subkeys = [m for m in timing.keys() if '.durations' in m and 'Total.durations' not in m]
# Create list of all durations. First, add the k to the timings, for debugging / output purposes
for k in subkeys:
for t in timing[k]:
t['timer'] = k
durations = []
for k in subkeys:
durations.extend(timing[k])
# Make sure we do not have an empty list
if len(durations) == 0:
debug("Found empty list of measurements")
error("Request for action " + action + " at time " + str(timestamp) + " has no submeasurements.")
return False
# Sort by relstart, reverse to use pop
durations.sort(key=lambda e: e['relstart'], reverse=True)
debug("Durations, potentially overlapping, reversed: " + str(durations))
# Get the first measurement, use as starting point
non_overlapping_durations = []
d = durations.pop()
non_overlapping_durations.append(d)
relstop = d['relstop']
timer = d['timer']
while True:
if len(durations) == 0:
break
d = durations.pop()
# Ignore this measurement, if it was before current ended
if d['relstart'] < relstop:
debug("Dropping duration because of overlap with timer " + timer + ": " + str(d))
continue
# Use this measurement instead
non_overlapping_durations.append(d)
relstop = d['relstop']
timer = d['timer']
debug("Durations, non overlapping: " + str(non_overlapping_durations))
# Now, sum the durations, and compare to total
durations_sum = sum([e['duration'] for e in non_overlapping_durations])
debug("Total for this timer: " + str(total) + ", durations_sum: " + str(durations_sum)
+ ", percentage: " + str(durations_sum/total*100))
result = durations_sum/total*100 >= target_percentage
if result:
info("Request for action " + action + " at time " + str(timestamp)
+ " has " + str(durations_sum/total*100) + "% durations, which is sufficient")
else:
error("Request for action " + action + " at time " + str(timestamp)
+ " has " + str(durations_sum/total*100) + "% durations, which is less than required")
info("Total: " + json.dumps(timing['Total']) + ", durations, non overlapping: "
+ json.dumps(non_overlapping_durations))
info("Request: " + json.dumps(request))
return result
def main():
start_time = datetime.datetime.now()
try:
global script_name
args = get_args()
global do_debug
do_debug = args.debug
debug("cli options: debug:" + str(args.debug))
# Read the json file
final_result = True
with open(args.filename) as json_file:
data = json.load(json_file)
for r in data:
final_result = analyze_timing(r, args.percentage) and final_result
if final_result:
info("All measurements higher than target percentage of " + str(args.percentage))
sys.exit(0)
else:
error("At least one measurements less than target percentage of " + str(args.percentage))
sys.exit(1)
except Exception:
output_log_msg(traceback.format_exc())
stop_time = datetime.datetime.now()
info("Time passed: " + str(stop_time - start_time))
error("Verification " + Colors.RED + "FAILED" + Colors.NORMAL +
" due to internal error (unhandled exception). Please file a bug report.")
sys.exit(2)
main()
| agpl-3.0 |
lumig242/Hue-Integration-with-CDAP | desktop/core/ext-py/elementtree/elementtree/HTMLTreeBuilder.py | 103 | 7826 | #
# ElementTree
# $Id: HTMLTreeBuilder.py 2325 2005-03-16 15:50:43Z fredrik $
#
# a simple tree builder, for HTML input
#
# history:
# 2002-04-06 fl created
# 2002-04-07 fl ignore IMG and HR end tags
# 2002-04-07 fl added support for 1.5.2 and later
# 2003-04-13 fl added HTMLTreeBuilder alias
# 2004-12-02 fl don't feed non-ASCII charrefs/entities as 8-bit strings
# 2004-12-05 fl don't feed non-ASCII CDATA as 8-bit strings
#
# Copyright (c) 1999-2004 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
##
# Tools to build element trees from HTML files.
##
import htmlentitydefs
import re, string, sys
import mimetools, StringIO
import ElementTree
AUTOCLOSE = "p", "li", "tr", "th", "td", "head", "body"
IGNOREEND = "img", "hr", "meta", "link", "br"
if sys.version[:3] == "1.5":
is_not_ascii = re.compile(r"[\x80-\xff]").search # 1.5.2
else:
is_not_ascii = re.compile(eval(r'u"[\u0080-\uffff]"')).search
try:
from HTMLParser import HTMLParser
except ImportError:
from sgmllib import SGMLParser
# hack to use sgmllib's SGMLParser to emulate 2.2's HTMLParser
class HTMLParser(SGMLParser):
# the following only works as long as this class doesn't
# provide any do, start, or end handlers
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, attrs)
def unknown_endtag(self, tag):
self.handle_endtag(tag)
##
# ElementTree builder for HTML source code. This builder converts an
# HTML document or fragment to an ElementTree.
# <p>
# The parser is relatively picky, and requires balanced tags for most
# elements. However, elements belonging to the following group are
# automatically closed: P, LI, TR, TH, and TD. In addition, the
# parser automatically inserts end tags immediately after the start
# tag, and ignores any end tags for the following group: IMG, HR,
# META, and LINK.
#
# @keyparam builder Optional builder object. If omitted, the parser
# uses the standard <b>elementtree</b> builder.
# @keyparam encoding Optional character encoding, if known. If omitted,
# the parser looks for META tags inside the document. If no tags
# are found, the parser defaults to ISO-8859-1. Note that if your
# document uses a non-ASCII compatible encoding, you must decode
# the document before parsing.
#
# @see elementtree.ElementTree
class HTMLTreeBuilder(HTMLParser):
# FIXME: shouldn't this class be named Parser, not Builder?
def __init__(self, builder=None, encoding=None):
self.__stack = []
if builder is None:
builder = ElementTree.TreeBuilder()
self.__builder = builder
self.encoding = encoding or "iso-8859-1"
HTMLParser.__init__(self)
##
# Flushes parser buffers, and return the root element.
#
# @return An Element instance.
def close(self):
HTMLParser.close(self)
return self.__builder.close()
##
# (Internal) Handles start tags.
def handle_starttag(self, tag, attrs):
if tag == "meta":
# look for encoding directives
http_equiv = content = None
for k, v in attrs:
if k == "http-equiv":
http_equiv = string.lower(v)
elif k == "content":
content = v
if http_equiv == "content-type" and content:
# use mimetools to parse the http header
header = mimetools.Message(
StringIO.StringIO("%s: %s\n\n" % (http_equiv, content))
)
encoding = header.getparam("charset")
if encoding:
self.encoding = encoding
if tag in AUTOCLOSE:
if self.__stack and self.__stack[-1] == tag:
self.handle_endtag(tag)
self.__stack.append(tag)
attrib = {}
if attrs:
for k, v in attrs:
attrib[string.lower(k)] = v
self.__builder.start(tag, attrib)
if tag in IGNOREEND:
self.__stack.pop()
self.__builder.end(tag)
##
# (Internal) Handles end tags.
def handle_endtag(self, tag):
if tag in IGNOREEND:
return
lasttag = self.__stack.pop()
if tag != lasttag and lasttag in AUTOCLOSE:
self.handle_endtag(lasttag)
self.__builder.end(tag)
##
# (Internal) Handles character references.
def handle_charref(self, char):
if char[:1] == "x":
char = int(char[1:], 16)
else:
char = int(char)
if 0 <= char < 128:
self.__builder.data(chr(char))
else:
self.__builder.data(unichr(char))
##
# (Internal) Handles entity references.
def handle_entityref(self, name):
entity = htmlentitydefs.entitydefs.get(name)
if entity:
if len(entity) == 1:
entity = ord(entity)
else:
entity = int(entity[2:-1])
if 0 <= entity < 128:
self.__builder.data(chr(entity))
else:
self.__builder.data(unichr(entity))
else:
self.unknown_entityref(name)
##
# (Internal) Handles character data.
def handle_data(self, data):
if isinstance(data, type('')) and is_not_ascii(data):
# convert to unicode, but only if necessary
data = unicode(data, self.encoding, "ignore")
self.__builder.data(data)
##
# (Hook) Handles unknown entity references. The default action
# is to ignore unknown entities.
def unknown_entityref(self, name):
pass # ignore by default; override if necessary
##
# An alias for the <b>HTMLTreeBuilder</b> class.
TreeBuilder = HTMLTreeBuilder
##
# Parse an HTML document or document fragment.
#
# @param source A filename or file object containing HTML data.
# @param encoding Optional character encoding, if known. If omitted,
# the parser looks for META tags inside the document. If no tags
# are found, the parser defaults to ISO-8859-1.
# @return An ElementTree instance
def parse(source, encoding=None):
return ElementTree.parse(source, HTMLTreeBuilder(encoding=encoding))
if __name__ == "__main__":
import sys
ElementTree.dump(parse(open(sys.argv[1])))
| apache-2.0 |
pexip/os-python-colorama | colorama/tests/initialise_test.py | 3 | 4244 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import os
import sys
from unittest import TestCase, main, skipUnless
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from ..ansitowin32 import StreamWrapper
from ..initialise import init
from .utils import osname, redirected_output, replace_by
orig_stdout = sys.stdout
orig_stderr = sys.stderr
class InitTest(TestCase):
@skipUnless(sys.stdout.isatty(), "sys.stdout is not a tty")
def setUp(self):
# sanity check
self.assertNotWrapped()
def tearDown(self):
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def assertWrapped(self):
self.assertIsNot(sys.stdout, orig_stdout, 'stdout should be wrapped')
self.assertIsNot(sys.stderr, orig_stderr, 'stderr should be wrapped')
self.assertTrue(isinstance(sys.stdout, StreamWrapper),
'bad stdout wrapper')
self.assertTrue(isinstance(sys.stderr, StreamWrapper),
'bad stderr wrapper')
def assertNotWrapped(self):
self.assertIs(sys.stdout, orig_stdout, 'stdout should not be wrapped')
self.assertIs(sys.stderr, orig_stderr, 'stderr should not be wrapped')
@patch('colorama.initialise.reset_all')
@patch('colorama.ansitowin32.winapi_test', lambda *_: True)
def testInitWrapsOnWindows(self, _):
with osname("nt"):
init()
self.assertWrapped()
@patch('colorama.initialise.reset_all')
@patch('colorama.ansitowin32.winapi_test', lambda *_: False)
def testInitDoesntWrapOnEmulatedWindows(self, _):
with osname("nt"):
init()
self.assertNotWrapped()
def testInitDoesntWrapOnNonWindows(self):
with osname("posix"):
init()
self.assertNotWrapped()
def testInitDoesntWrapIfNone(self):
with replace_by(None):
init()
# We can't use assertNotWrapped here because replace_by(None)
# changes stdout/stderr already.
self.assertIsNone(sys.stdout)
self.assertIsNone(sys.stderr)
def testInitAutoresetOnWrapsOnAllPlatforms(self):
with osname("posix"):
init(autoreset=True)
self.assertWrapped()
def testInitWrapOffDoesntWrapOnWindows(self):
with osname("nt"):
init(wrap=False)
self.assertNotWrapped()
def testInitWrapOffIncompatibleWithAutoresetOn(self):
self.assertRaises(ValueError, lambda: init(autoreset=True, wrap=False))
@patch('colorama.ansitowin32.winterm', None)
@patch('colorama.ansitowin32.winapi_test', lambda *_: True)
def testInitOnlyWrapsOnce(self):
with osname("nt"):
init()
init()
self.assertWrapped()
@patch('colorama.win32.SetConsoleTextAttribute')
@patch('colorama.initialise.AnsiToWin32')
def testAutoResetPassedOn(self, mockATW32, _):
with osname("nt"):
init(autoreset=True)
self.assertEqual(len(mockATW32.call_args_list), 2)
self.assertEqual(mockATW32.call_args_list[1][1]['autoreset'], True)
self.assertEqual(mockATW32.call_args_list[0][1]['autoreset'], True)
@patch('colorama.initialise.AnsiToWin32')
def testAutoResetChangeable(self, mockATW32):
with osname("nt"):
init()
init(autoreset=True)
self.assertEqual(len(mockATW32.call_args_list), 4)
self.assertEqual(mockATW32.call_args_list[2][1]['autoreset'], True)
self.assertEqual(mockATW32.call_args_list[3][1]['autoreset'], True)
init()
self.assertEqual(len(mockATW32.call_args_list), 6)
self.assertEqual(
mockATW32.call_args_list[4][1]['autoreset'], False)
self.assertEqual(
mockATW32.call_args_list[5][1]['autoreset'], False)
@patch('colorama.initialise.atexit.register')
def testAtexitRegisteredOnlyOnce(self, mockRegister):
init()
self.assertTrue(mockRegister.called)
mockRegister.reset_mock()
init()
self.assertFalse(mockRegister.called)
if __name__ == '__main__':
main()
| bsd-3-clause |
etos/django | tests/annotations/tests.py | 6 | 21667 | import datetime
from decimal import Decimal
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db.models import (
BooleanField, CharField, Count, DateTimeField, ExpressionWrapper, F, Func,
IntegerField, NullBooleanField, Q, Sum, Value,
)
from django.db.models.functions import Length, Lower
from django.test import TestCase, skipUnlessDBFeature
from .models import (
Author, Book, Company, DepartmentStore, Employee, Publisher, Store, Ticket,
)
def cxOracle_py3_bug(func):
"""
There's a bug in Django/cx_Oracle with respect to string handling under
Python 3 (essentially, they treat Python 3 strings as Python 2 strings
rather than unicode). This makes some tests here fail under Python 3, so
we mark them as expected failures until someone fixes them in #23843.
"""
from unittest import expectedFailure
from django.db import connection
return expectedFailure(func) if connection.vendor == 'oracle' else func
class NonAggregateAnnotationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_basic_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()))
for book in books:
self.assertEqual(book.is_book, 1)
def test_basic_f_annotation(self):
books = Book.objects.annotate(another_rating=F('rating'))
for book in books:
self.assertEqual(book.another_rating, book.rating)
def test_joined_annotation(self):
books = Book.objects.select_related('publisher').annotate(
num_awards=F('publisher__num_awards'))
for book in books:
self.assertEqual(book.num_awards, book.publisher.num_awards)
def test_mixed_type_annotation_date_interval(self):
active = datetime.datetime(2015, 3, 20, 14, 0, 0)
duration = datetime.timedelta(hours=1)
expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration
Ticket.objects.create(active_at=active, duration=duration)
t = Ticket.objects.annotate(
expires=ExpressionWrapper(F('active_at') + F('duration'), output_field=DateTimeField())
).first()
self.assertEqual(t.expires, expires)
def test_mixed_type_annotation_numbers(self):
test = self.b1
b = Book.objects.annotate(
combined=ExpressionWrapper(F('pages') + F('rating'), output_field=IntegerField())
).get(isbn=test.isbn)
combined = int(test.pages + test.rating)
self.assertEqual(b.combined, combined)
def test_empty_expression_annotation(self):
books = Book.objects.annotate(
selected=ExpressionWrapper(Q(pk__in=[]), output_field=BooleanField())
)
self.assertEqual(len(books), Book.objects.count())
self.assertTrue(all(not book.selected for book in books))
books = Book.objects.annotate(
selected=ExpressionWrapper(Q(pk__in=Book.objects.none()), output_field=BooleanField())
)
self.assertEqual(len(books), Book.objects.count())
self.assertTrue(all(not book.selected for book in books))
def test_annotate_with_aggregation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()),
rating_count=Count('rating'))
for book in books:
self.assertEqual(book.is_book, 1)
self.assertEqual(book.rating_count, 1)
def test_aggregate_over_annotation(self):
agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age'))
other_agg = Author.objects.aggregate(age_sum=Sum('age'))
self.assertEqual(agg['otherage_sum'], other_agg['age_sum'])
@skipUnlessDBFeature('can_distinct_on_fields')
def test_distinct_on_with_annotation(self):
store = Store.objects.create(
name='test store',
original_opening=datetime.datetime.now(),
friday_night_closing=datetime.time(21, 00, 00),
)
names = [
'Theodore Roosevelt',
'Eleanor Roosevelt',
'Franklin Roosevelt',
'Ned Stark',
'Catelyn Stark',
]
for name in names:
Employee.objects.create(
store=store,
first_name=name.split()[0],
last_name=name.split()[1],
age=30, salary=2000,
)
people = Employee.objects.annotate(
name_lower=Lower('last_name'),
).distinct('name_lower')
self.assertEqual({p.last_name for p in people}, {'Stark', 'Roosevelt'})
self.assertEqual(len(people), 2)
people2 = Employee.objects.annotate(
test_alias=F('store__name'),
).distinct('test_alias')
self.assertEqual(len(people2), 1)
lengths = Employee.objects.annotate(
name_len=Length('first_name'),
).distinct('name_len').values_list('name_len', flat=True)
self.assertSequenceEqual(lengths, [3, 7, 8])
def test_filter_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField())
).filter(is_book=1)
for book in books:
self.assertEqual(book.is_book, 1)
def test_filter_annotation_with_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=3.5)
for book in books:
self.assertEqual(book.other_rating, 3.5)
def test_filter_annotation_with_double_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=F('rating'))
for book in books:
self.assertEqual(book.other_rating, book.rating)
def test_filter_agg_with_double_f(self):
books = Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('sum_rating'))
for book in books:
self.assertEqual(book.sum_rating, book.rating)
def test_filter_wrong_annotation(self):
with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."):
list(Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('nope')))
def test_combined_annotation_commutative(self):
book1 = Book.objects.annotate(adjusted_rating=F('rating') + 2).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=2 + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
book1 = Book.objects.annotate(adjusted_rating=F('rating') + None).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=None + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
def test_update_with_annotation(self):
book_preupdate = Book.objects.get(pk=self.b2.pk)
Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating'))
book_postupdate = Book.objects.get(pk=self.b2.pk)
self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating)
def test_annotation_with_m2m(self):
books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=self.b1.pk).order_by('author_age')
self.assertEqual(books[0].author_age, 34)
self.assertEqual(books[1].author_age, 35)
def test_annotation_reverse_m2m(self):
books = Book.objects.annotate(
store_name=F('store__name')).filter(
name='Practical Django Projects').order_by(
'store_name')
self.assertQuerysetEqual(
books, [
'Amazon.com',
'Books.com',
'Mamma and Pappa\'s Books'
],
lambda b: b.store_name
)
def test_values_annotation(self):
"""
Annotations can reference fields in a values clause,
and contribute to an existing values clause.
"""
# annotate references a field in values()
qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1)
book = qs.get(pk=self.b1.pk)
self.assertEqual(book['rating'] - 1, book['other_rating'])
# filter refs the annotated value
book = qs.get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
# can annotate an existing values with a new field
book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
self.assertEqual(book['other_isbn'], '155860191')
def test_values_with_pk_annotation(self):
# annotate references a field in values() with pk
publishers = Publisher.objects.values('id', 'book__rating').annotate(total=Sum('book__rating'))
for publisher in publishers.filter(pk=self.p1.pk):
self.assertEqual(publisher['book__rating'], publisher['total'])
def test_defer_annotation(self):
"""
Deferred attributes can be referenced by an annotation,
but they are not themselves deferred, and cannot be deferred.
"""
qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1)
with self.assertNumQueries(2):
book = qs.get(other_rating=4)
self.assertEqual(book.rating, 5)
self.assertEqual(book.other_rating, 4)
with self.assertRaisesMessage(FieldDoesNotExist, "Book has no field named 'other_rating'"):
book = qs.defer('other_rating').get(other_rating=4)
def test_mti_annotations(self):
"""
Fields on an inherited model can be referenced by an
annotated field.
"""
d = DepartmentStore.objects.create(
name='Angus & Robinson',
original_opening=datetime.date(2014, 3, 8),
friday_night_closing=datetime.time(21, 00, 00),
chain='Westfield'
)
books = Book.objects.filter(rating__gt=4)
for b in books:
d.books.add(b)
qs = DepartmentStore.objects.annotate(
other_name=F('name'),
other_chain=F('chain'),
is_open=Value(True, BooleanField()),
book_isbn=F('books__isbn')
).order_by('book_isbn').filter(chain='Westfield')
self.assertQuerysetEqual(
qs, [
('Angus & Robinson', 'Westfield', True, '155860191'),
('Angus & Robinson', 'Westfield', True, '159059725')
],
lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn)
)
def test_null_annotation(self):
"""
Annotating None onto a model round-trips
"""
book = Book.objects.annotate(no_value=Value(None, output_field=IntegerField())).first()
self.assertIsNone(book.no_value)
def test_order_by_annotation(self):
authors = Author.objects.annotate(other_age=F('age')).order_by('other_age')
self.assertQuerysetEqual(
authors, [
25, 29, 29, 34, 35, 37, 45, 46, 57,
],
lambda a: a.other_age
)
def test_order_by_aggregate(self):
authors = Author.objects.values('age').annotate(age_count=Count('age')).order_by('age_count', 'age')
self.assertQuerysetEqual(
authors, [
(25, 1), (34, 1), (35, 1), (37, 1), (45, 1), (46, 1), (57, 1), (29, 2),
],
lambda a: (a['age'], a['age_count'])
)
def test_annotate_exists(self):
authors = Author.objects.annotate(c=Count('id')).filter(c__gt=1)
self.assertFalse(authors.exists())
def test_column_field_ordering(self):
"""
Columns are aligned in the correct order for resolve_columns. This test
will fail on MySQL if column ordering is out. Column fields should be
aligned as:
1. extra_select
2. model_fields
3. annotation_fields
4. model_related_fields
"""
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
self.assertQuerysetEqual(
qs.order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
def test_column_field_ordering_with_deferred(self):
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
# and we respect deferred columns!
self.assertQuerysetEqual(
qs.defer('age').order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
@cxOracle_py3_bug
def test_custom_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE'
)
).order_by('name')
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'),
('Django Software Foundation', 'No Tag'),
('Google', 'Do No Evil'),
('Yahoo', 'Internet Company')
],
lambda c: (c.name, c.tagline)
)
@cxOracle_py3_bug
def test_custom_functions_can_ref_other_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
class Lower(Func):
function = 'LOWER'
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE')
).annotate(
tagline_lower=Lower(F('tagline'), output_field=CharField())
).order_by('name')
# LOWER function supported by:
# oracle, postgres, mysql, sqlite, sqlserver
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'.lower()),
('Django Software Foundation', 'No Tag'.lower()),
('Google', 'Do No Evil'.lower()),
('Yahoo', 'Internet Company'.lower())
],
lambda c: (c.name, c.tagline_lower)
)
def test_boolean_value_annotation(self):
books = Book.objects.annotate(
is_book=Value(True, output_field=BooleanField()),
is_pony=Value(False, output_field=BooleanField()),
is_none=Value(None, output_field=NullBooleanField()),
)
self.assertGreater(len(books), 0)
for book in books:
self.assertIs(book.is_book, True)
self.assertIs(book.is_pony, False)
self.assertIsNone(book.is_none)
def test_arguments_must_be_expressions(self):
msg = 'QuerySet.annotate() received non-expression(s): %s.'
with self.assertRaisesMessage(TypeError, msg % BooleanField()):
Book.objects.annotate(BooleanField())
with self.assertRaisesMessage(TypeError, msg % True):
Book.objects.annotate(is_book=True)
with self.assertRaisesMessage(TypeError, msg % ', '.join([str(BooleanField()), 'True'])):
Book.objects.annotate(BooleanField(), Value(False), is_book=True)
| bsd-3-clause |
McNetic/CouchPotatoServer-de | libs/CodernityDB/rr_cache.py | 82 | 3673 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from random import choice
def cache1lvl(maxsize=100):
def decorating_function(user_function):
cache1lvl = {}
@functools.wraps(user_function)
def wrapper(key, *args, **kwargs):
try:
result = cache1lvl[key]
except KeyError:
if len(cache1lvl) == maxsize:
for i in xrange(maxsize // 10 or 1):
del cache1lvl[choice(cache1lvl.keys())]
cache1lvl[key] = user_function(key, *args, **kwargs)
result = cache1lvl[key]
# result = user_function(obj, key, *args, **kwargs)
return result
def clear():
cache1lvl.clear()
def delete(key):
try:
del cache1lvl[key]
return True
except KeyError:
return False
wrapper.clear = clear
wrapper.cache = cache1lvl
wrapper.delete = delete
return wrapper
return decorating_function
def cache2lvl(maxsize=100):
def decorating_function(user_function):
cache = {}
@functools.wraps(user_function)
def wrapper(*args, **kwargs):
# return user_function(*args, **kwargs)
try:
result = cache[args[0]][args[1]]
except KeyError:
# print wrapper.cache_size
if wrapper.cache_size == maxsize:
to_delete = maxsize // 10 or 1
for i in xrange(to_delete):
key1 = choice(cache.keys())
key2 = choice(cache[key1].keys())
del cache[key1][key2]
if not cache[key1]:
del cache[key1]
wrapper.cache_size -= to_delete
# print wrapper.cache_size
result = user_function(*args, **kwargs)
try:
cache[args[0]][args[1]] = result
except KeyError:
cache[args[0]] = {args[1]: result}
wrapper.cache_size += 1
return result
def clear():
cache.clear()
wrapper.cache_size = 0
def delete(key, inner_key=None):
if inner_key:
try:
del cache[key][inner_key]
if not cache[key]:
del cache[key]
wrapper.cache_size -= 1
return True
except KeyError:
return False
else:
try:
wrapper.cache_size -= len(cache[key])
del cache[key]
return True
except KeyError:
return False
wrapper.clear = clear
wrapper.cache = cache
wrapper.delete = delete
wrapper.cache_size = 0
return wrapper
return decorating_function
| gpl-3.0 |
ClifHouck/desperado | desperado/currency.py | 1 | 1299 | import re
class InvalidCurrencyFormat(Exception):
def __init__(self, reason, data):
self.reason = reason
self.data = data
# XXX
# FIXME: We're entering dangerous territory here... test thoroughly before doing anything significant.
# Or maybe replace entirely.
# XXX
class Dollars(object):
def __init__(self, dollars, cents):
self.dollars = dollars
self.cents = cents
self.only_cents = dollars * 100 + cents
def to_cents(self):
return self.only_cents
@staticmethod
def from_cents(cents):
dollars = cents // 100
cents = cents % 100
return Dollars(dollars, cents)
@staticmethod
def from_string(text):
dollars, cents = Dollars.__parse_string(text)
return Dollars(dollars, cents)
@staticmethod
def __parse_string(text):
match = re.search("(\d+)\.(\d+)", text)
if match:
return (int(match.group(1)), int(match.group(2)))
raise InvalidCurrencyFormat("Could not convert string into a dollar amount.", text)
def __add__(self, other):
total_cents = self.only_cents + other.only_cents
return Dollars.from_cents(total_cents)
def __str__(self):
return "$%d.%02d" % (self.dollars, self.cents)
| bsd-3-clause |
flutter/engine | build/copy_info_plist.py | 14 | 1271 | #!/usr/bin/env python
#
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Copies the Info.plist and adds extra fields to it like the git hash of the
engine.
Precondition: $CWD/../../flutter is the path to the flutter engine repo.
usage: copy_info_plist.py <src_path> <dest_path> --bitcode=<enable_bitcode>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import subprocess
import sys
import git_revision
import os
def GetClangVersion(bitcode) :
clang_executable = str(os.path.join("..", "..", "buildtools", "mac-x64", "clang", "bin", "clang++"))
if bitcode:
clang_executable = "clang++"
version = subprocess.check_output([clang_executable, "--version"])
return version.splitlines()[0]
def main():
text = open(sys.argv[1]).read()
engine_path = os.path.join(os.getcwd(), "..", "..", "flutter")
revision = git_revision.GetRepositoryVersion(engine_path)
clang_version = GetClangVersion(sys.argv[3] == "--bitcode=true")
text = text.format(revision, clang_version)
with open(sys.argv[2], "w") as outfile:
outfile.write(text)
if __name__ == "__main__":
main()
| bsd-3-clause |
MichaelCoughlinAN/Odds-N-Ends | Python/Python Modules/lxml-4.2.0/doc/docstructure.py | 19 | 1107 |
import os
if os.path.exists(os.path.join(os.path.dirname(__file__), '..', 'funding.txt')):
funding = ('../funding.txt',)
else:
funding = ()
SITE_STRUCTURE = [
('lxml', ('main.txt', 'intro.txt', '../INSTALL.txt', # 'lxml2.txt',
'performance.txt', 'compatibility.txt', 'FAQ.txt') + funding),
('Developing with lxml', ('tutorial.txt', '@API reference',
'api.txt', 'parsing.txt',
'validation.txt', 'xpathxslt.txt',
'objectify.txt', 'lxmlhtml.txt',
'cssselect.txt', 'elementsoup.txt',
'html5parser.txt')),
('Extending lxml', ('resolvers.txt', 'extensions.txt',
'element_classes.txt', 'sax.txt', 'capi.txt')),
('Developing lxml', ('build.txt', 'lxml-source-howto.txt',
'@Release Changelog', '../CREDITS.txt')),
]
HREF_MAP = {
"API reference" : "api/index.html"
}
BASENAME_MAP = {
'main' : 'index',
'INSTALL' : 'installation',
'CREDITS' : 'credits',
}
| gpl-3.0 |
beeverycreative/BeePanel | Settings.py | 1 | 5473 | #!/usr/bin/env python3
"""
* Copyright (c) 2015 BEEVC - Electronic Systems This file is part of BEESOFT
* software: you can redistribute it and/or modify it under the terms of the GNU
* General Public License as published by the Free Software Foundation, either
* version 3 of the License, or (at your option) any later version. BEESOFT is
* distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
* PARTICULAR PURPOSE. See the GNU General Public License for more details. You
* should have received a copy of the GNU General Public License along with
* BEESOFT. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Marcos Gomes"
__license__ = "MIT"
import os
import pygame
class SettingsScreen():
screen = None
interfaceLoader = None
lblFontColor = None
lblXPos = None
lblYPos = None
lblText = None
lblFont = None
lbl = None
buttons = None
updateReady = None
exitNeedsHoming = False
exitCallBackResp = None
"""*************************************************************************
Init Method
Inits current screen components
*************************************************************************"""
def __init__(self, screen, interfaceLoader, comm):
"""
.
"""
print("Loading Settings Screen Components")
self.screen = screen
self.interfaceLoader = interfaceLoader
self.updateReady = False
self.lblFontColor = self.interfaceLoader.GetLblsFontColor()
self.lblXPos = self.interfaceLoader.GetLblsXPos()
self.lblYPos = self.interfaceLoader.GetLblsYPos()
self.lblText = self.interfaceLoader.GetLblsText()
self.lblFont = self.interfaceLoader.GetLblsFont()
self.buttons = self.interfaceLoader.GetButtonsList()
return
"""*************************************************************************
handle_events Method
Received the event vector and checks if it has any event from interface items
*************************************************************************"""
def handle_events(self,retVal):
"""handle all events."""
for event in retVal:
for btn in self.buttons:
if 'click' in btn.handleEvent(event):
btnName = btn._propGetName()
if btnName == "Update Cura":
print("Updating Cura...")
elif btnName == "Update WiFi":
print("Updating WiFi...")
elif btnName == "Screen Calibration":
os.system("sudo TSLIB_FBDEVICE=/dev/fb1 TSLIB_TSDEVICE=/dev/input/touchscreen ts_calibrate")
elif btnName == "Quit BEETFT":
#pygame.quit()
self.exitCallBackResp = "Quit"
return
"""*************************************************************************
update Method
Updates screen components
*************************************************************************"""
def update(self):
self.lbls = []
for i in range(0,len(self.lblText)):
self.lbls.append(self.lblFont[i].render(self.lblText[i], 1, self.lblFontColor[i]))
for btn in self.buttons:
btn.visible = True
return
"""*************************************************************************
draw Method
Draws current screen
*************************************************************************"""
def draw(self):
for i in range(0,len(self.lblText)):
self.screen.blit(self.lbls[i], (self.lblXPos[i],self.lblYPos[i]))
for btn in self.buttons:
btn.draw(self.screen)
return
"""*************************************************************************
GetCurrentScreenName Method
Frees every element from memmory
*************************************************************************"""
def GetCurrentScreenName(self):
return "Settings"
"""*************************************************************************
KillAll Method
Frees every element from memmory
*************************************************************************"""
def KillAll(self):
return
"""*************************************************************************
ExitCallBack Method
Tells the main class to load the default interface
*************************************************************************"""
def ExitCallBack(self):
return self.exitCallBackResp
"""*************************************************************************
Pull Method
Pull variables
*************************************************************************"""
def Pull(self):
return | gpl-2.0 |
popazerty/beyonwiz-sh4 | lib/python/Components/Renderer/PositionGauge.py | 132 | 1892 | from Renderer import Renderer
from enigma import ePositionGauge
class PositionGauge(Renderer):
def __init__(self):
Renderer.__init__(self)
self.__position = 0
self.__seek_position = 0
self.__length = 0
self.__seek_enable = 0
self.__cutlist = [ ]
GUI_WIDGET = ePositionGauge
def postWidgetCreate(self, instance):
self.changed((self.CHANGED_DEFAULT,))
self.cutlist_changed()
instance.setInOutList(self.__cutlist)
def changed(self, what):
if what[0] == self.CHANGED_CLEAR:
(self.length, self.position) = 0
else:
(self.length, self.position) = (self.source.length or 0, self.source.position or 0)
def cutlist_changed(self):
self.cutlist = self.source.cutlist or [ ]
def getPosition(self):
return self.__position
def setPosition(self, pos):
self.__position = pos
if self.instance is not None:
self.instance.setPosition(pos)
position = property(getPosition, setPosition)
def getLength(self):
return self.__length
def setLength(self, len):
self.__length = len
if self.instance is not None:
self.instance.setLength(len)
length = property(getLength, setLength)
def getCutlist(self):
return self.__cutlist
def setCutlist(self, cutlist):
if self.__cutlist != cutlist:
self.__cutlist = cutlist
if self.instance is not None:
self.instance.setInOutList(cutlist)
cutlist = property(getCutlist, setCutlist)
def getSeekEnable(self):
return self.__seek_enable
def setSeekEnable(self, val):
self.__seek_enable = val
if self.instance is not None:
self.instance.enableSeekPointer(val)
seek_pointer_enabled = property(getSeekEnable, setSeekEnable)
def getSeekPosition(self):
return self.__seek_position
def setSeekPosition(self, pos):
self.__seek_position = pos
if self.instance is not None:
self.instance.setSeekPosition(pos)
seek_pointer_position = property(getSeekPosition, setSeekPosition)
| gpl-2.0 |
eemirtekin/edx-platform | common/lib/xmodule/xmodule/tests/test_progress.py | 56 | 5073 | """Module progress tests"""
import unittest
from mock import Mock
from xblock.field_data import DictFieldData
from xmodule.progress import Progress
from xmodule import x_module
from . import get_test_system
class ProgressTest(unittest.TestCase):
''' Test that basic Progress objects work. A Progress represents a
fraction between 0 and 1.
'''
not_started = Progress(0, 17)
part_done = Progress(2, 6)
half_done = Progress(3, 6)
also_half_done = Progress(1, 2)
done = Progress(7, 7)
def test_create_object(self):
# These should work:
prg1 = Progress(0, 2) # pylint: disable=unused-variable
prg2 = Progress(1, 2) # pylint: disable=unused-variable
prg3 = Progress(2, 2) # pylint: disable=unused-variable
prg4 = Progress(2.5, 5.0) # pylint: disable=unused-variable
prg5 = Progress(3.7, 12.3333) # pylint: disable=unused-variable
# These shouldn't
self.assertRaises(ValueError, Progress, 0, 0)
self.assertRaises(ValueError, Progress, 2, 0)
self.assertRaises(ValueError, Progress, 1, -2)
self.assertRaises(TypeError, Progress, 0, "all")
# check complex numbers just for the heck of it :)
self.assertRaises(TypeError, Progress, 2j, 3)
def test_clamp(self):
self.assertEqual((2, 2), Progress(3, 2).frac())
self.assertEqual((0, 2), Progress(-2, 2).frac())
def test_frac(self):
prg = Progress(1, 2)
(a_mem, b_mem) = prg.frac()
self.assertEqual(a_mem, 1)
self.assertEqual(b_mem, 2)
def test_percent(self):
self.assertEqual(self.not_started.percent(), 0)
self.assertAlmostEqual(self.part_done.percent(), 33.33333333333333)
self.assertEqual(self.half_done.percent(), 50)
self.assertEqual(self.done.percent(), 100)
self.assertEqual(self.half_done.percent(), self.also_half_done.percent())
def test_started(self):
self.assertFalse(self.not_started.started())
self.assertTrue(self.part_done.started())
self.assertTrue(self.half_done.started())
self.assertTrue(self.done.started())
def test_inprogress(self):
# only true if working on it
self.assertFalse(self.done.inprogress())
self.assertFalse(self.not_started.inprogress())
self.assertTrue(self.part_done.inprogress())
self.assertTrue(self.half_done.inprogress())
def test_done(self):
self.assertTrue(self.done.done())
self.assertFalse(self.half_done.done())
self.assertFalse(self.not_started.done())
def test_str(self):
self.assertEqual(str(self.not_started), "0/17")
self.assertEqual(str(self.part_done), "2/6")
self.assertEqual(str(self.done), "7/7")
def test_ternary_str(self):
self.assertEqual(self.not_started.ternary_str(), "none")
self.assertEqual(self.half_done.ternary_str(), "in_progress")
self.assertEqual(self.done.ternary_str(), "done")
def test_to_js_status(self):
'''Test the Progress.to_js_status_str() method'''
self.assertEqual(Progress.to_js_status_str(self.not_started), "none")
self.assertEqual(Progress.to_js_status_str(self.half_done), "in_progress")
self.assertEqual(Progress.to_js_status_str(self.done), "done")
self.assertEqual(Progress.to_js_status_str(None), "0")
def test_to_js_detail_str(self):
'''Test the Progress.to_js_detail_str() method'''
f = Progress.to_js_detail_str
for prg in (self.not_started, self.half_done, self.done):
self.assertEqual(f(prg), str(prg))
# But None should be encoded as 0
self.assertEqual(f(None), "0")
def test_add(self):
'''Test the Progress.add_counts() method'''
prg1 = Progress(0, 2)
prg2 = Progress(1, 3)
prg3 = Progress(2, 5)
prg_none = None
add = lambda a, b: Progress.add_counts(a, b).frac()
self.assertEqual(add(prg1, prg1), (0, 4))
self.assertEqual(add(prg1, prg2), (1, 5))
self.assertEqual(add(prg2, prg3), (3, 8))
self.assertEqual(add(prg2, prg_none), prg2.frac())
self.assertEqual(add(prg_none, prg2), prg2.frac())
def test_equality(self):
'''Test that comparing Progress objects for equality
works correctly.'''
prg1 = Progress(1, 2)
prg2 = Progress(2, 4)
prg3 = Progress(1, 2)
self.assertTrue(prg1 == prg3)
self.assertFalse(prg1 == prg2)
# Check != while we're at it
self.assertTrue(prg1 != prg2)
self.assertFalse(prg1 != prg3)
class ModuleProgressTest(unittest.TestCase):
''' Test that get_progress() does the right thing for the different modules
'''
def test_xmodule_default(self):
'''Make sure default get_progress exists, returns None'''
xmod = x_module.XModule(Mock(), get_test_system(), DictFieldData({'location': 'a://b/c/d/e'}), Mock())
prg = xmod.get_progress()
self.assertEqual(prg, None)
| agpl-3.0 |
marratj/ansible | lib/ansible/modules/network/nxos/nxos_pim_rp_address.py | 17 | 6358 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_pim_rp_address
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages configuration of an PIM static RP address instance.
description:
- Manages configuration of an Protocol Independent Multicast (PIM) static
rendezvous point (RP) address instance.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(state=absent) is currently not supported on all platforms.
options:
rp_address:
description:
- Configures a Protocol Independent Multicast (PIM) static
rendezvous point (RP) address. Valid values are
unicast addresses.
required: true
group_list:
description:
- Group range for static RP. Valid values are multicast addresses.
required: false
default: null
prefix_list:
description:
- Prefix list policy for static RP. Valid values are prefix-list
policy names.
required: false
default: null
route_map:
description:
- Route map policy for static RP. Valid values are route-map
policy names.
required: false
default: null
bidir:
description:
- Group range is treated in PIM bidirectional mode.
required: false
choices: ['true','false']
default: null
'''
EXAMPLES = '''
- nxos_pim_rp_address:
rp_address: "10.1.1.20"
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"]
'''
import re
from ansible.module_utils.nxos import get_config, load_config
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
def get_existing(module, args):
existing = {}
config = str(get_config(module))
address = module.params['rp_address']
pim_address_re = r'ip pim rp-address (?P<value>.*)$'
for line in re.findall(pim_address_re, config, re.M):
values = line.split()
if values[0] != address:
continue
existing['bidir'] = existing.get('bidir') or 'bidir' in line
if len(values) > 2:
value = values[2]
if values[1] == 'route-map':
existing['route_map'] = value
elif values[1] == 'prefix-list':
existing['prefix_list'] = value
elif values[1] == 'group-list':
existing['group_list'] = value
return existing
def state_present(module, existing, proposed, candidate):
address = module.params['rp_address']
command = 'ip pim rp-address {0}'.format(address)
commands = build_command(proposed, command)
if commands:
candidate.add(commands, parents=[])
def build_command(param_dict, command):
for param in ['group_list', 'prefix_list', 'route_map']:
if param_dict.get(param):
command += ' {0} {1}'.format(
param.replace('_', '-'), param_dict.get(param))
if param_dict.get('bidir'):
command += ' bidir'
return [command]
def state_absent(module, existing, candidate):
address = module.params['rp_address']
command = 'no ip pim rp-address {0}'.format(address)
if existing.get('group_list'):
commands = build_command(existing, command)
else:
commands = [command]
candidate.add(commands, parents=[])
def main():
argument_spec = dict(
rp_address=dict(required=True, type='str'),
group_list=dict(required=False, type='str'),
prefix_list=dict(required=False, type='str'),
route_map=dict(required=False, type='str'),
bidir=dict(required=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present', required=False),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['group_list', 'route_map'],
['group_list', 'prefix_list'],
['route_map', 'prefix_list']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'commands': [], 'warnings': warnings}
state = module.params['state']
args = [
'rp_address',
'group_list',
'prefix_list',
'route_map',
'bidir'
]
existing = get_existing(module, args)
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'rp_address':
if str(value).lower() == 'true':
value = True
elif str(value).lower() == 'false':
value = False
if existing.get(key) != value:
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
if state == 'present' and (proposed or not existing):
state_present(module, existing, proposed, candidate)
elif state == 'absent' and existing:
state_absent(module, existing, candidate)
if candidate:
candidate = candidate.items_text()
result['commands'] = candidate
result['changed'] = True
load_config(module, candidate)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
apache/incubator-airflow | airflow/api_connexion/schemas/task_schema.py | 10 | 2992 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List, NamedTuple
from marshmallow import Schema, fields
from airflow.api_connexion.schemas.common_schema import (
ClassReferenceSchema,
ColorField,
TimeDeltaSchema,
WeightRuleField,
)
from airflow.api_connexion.schemas.dag_schema import DAGSchema
from airflow.models.baseoperator import BaseOperator
class TaskSchema(Schema):
"""Task schema"""
class_ref = fields.Method("_get_class_reference", dump_only=True)
task_id = fields.String(dump_only=True)
owner = fields.String(dump_only=True)
start_date = fields.DateTime(dump_only=True)
end_date = fields.DateTime(dump_only=True)
trigger_rule = fields.String(dump_only=True)
extra_links = fields.List(
fields.Nested(ClassReferenceSchema), dump_only=True, attribute="operator_extra_links"
)
depends_on_past = fields.Boolean(dump_only=True)
wait_for_downstream = fields.Boolean(dump_only=True)
retries = fields.Number(dump_only=True)
queue = fields.String(dump_only=True)
pool = fields.String(dump_only=True)
pool_slots = fields.Number(dump_only=True)
execution_timeout = fields.Nested(TimeDeltaSchema, dump_only=True)
retry_delay = fields.Nested(TimeDeltaSchema, dump_only=True)
retry_exponential_backoff = fields.Boolean(dump_only=True)
priority_weight = fields.Number(dump_only=True)
weight_rule = WeightRuleField(dump_only=True)
ui_color = ColorField(dump_only=True)
ui_fgcolor = ColorField(dump_only=True)
template_fields = fields.List(fields.String(), dump_only=True)
sub_dag = fields.Nested(DAGSchema, dump_only=True)
downstream_task_ids = fields.List(fields.String(), dump_only=True)
def _get_class_reference(self, obj):
result = ClassReferenceSchema().dump(obj)
return result.data if hasattr(result, "data") else result
class TaskCollection(NamedTuple):
"""List of Tasks with metadata"""
tasks: List[BaseOperator]
total_entries: int
class TaskCollectionSchema(Schema):
"""Schema for TaskCollection"""
tasks = fields.List(fields.Nested(TaskSchema))
total_entries = fields.Int()
task_schema = TaskSchema()
task_collection_schema = TaskCollectionSchema()
| apache-2.0 |
ThomasMiconi/htmresearch | projects/capybara/sandbox/classification/run_htm_network.py | 9 | 13195 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import argparse
import csv
import logging
import json
import numpy as np
import simplejson
import os
from nupic.data.file_record_stream import FileRecordStream
from htmresearch.frameworks.classification.network_factory import (
createAndConfigureNetwork, setRegionLearning, loadNetwork, saveNetwork)
_LOGGER = logging.getLogger()
_LOGGER.setLevel(logging.DEBUG)
def initTrace():
trace = {
'recordNumber': [],
'sensorValue': [],
'actualCategory': [],
'spActiveColumns': [],
'tmActiveCells': [],
'tmPredictedActiveCells': [],
'anomalyScore': [],
'classificationInference': [],
'classificationAccuracy': [],
}
return trace
def computeAccuracy(value, expectedValue):
if value != expectedValue:
accuracy = 0
else:
accuracy = 1
return accuracy
def computeNetworkStats(sensorRegion,
spRegion,
tmRegion,
classifierRegion):
"""
Compute HTM network statistics
"""
sensorValue = sensorRegion.getOutputData('sourceOut')[0]
actualCategory = sensorRegion.getOutputData('categoryOut')[0]
if spRegion:
encoderNZ = spRegion.getInputData('bottomUpIn').astype(int).nonzero()[0]
_LOGGER.debug('Encoder non-zero indices: %s' % encoderNZ)
spActiveColumns = spRegion.getOutputData(
'bottomUpOut').astype(int).nonzero()[0]
else:
spActiveColumns = None
if tmRegion:
tmPredictedActiveCells = tmRegion.getOutputData(
'predictedActiveCells').astype(int).nonzero()[0]
tmActiveCells = tmRegion.getOutputData(
'activeCells').astype(int).nonzero()[0]
anomalyScore = tmRegion.getOutputData(
'anomalyScore')[0]
else:
tmActiveCells = None
tmPredictedActiveCells = None
anomalyScore = None
classificationInference = getClassifierInference(classifierRegion)
classificationAccuracy = computeAccuracy(classificationInference,
actualCategory)
return (sensorValue,
actualCategory,
spActiveColumns,
tmActiveCells,
tmPredictedActiveCells,
anomalyScore,
classificationInference,
classificationAccuracy)
def updateTrace(trace,
recordNumber,
sensorValue,
actualCategory,
spActiveColumns,
tmActiveCells,
tmPredictedActiveCells,
anomalyScore,
classificationInference,
classificationAccuracy):
trace['recordNumber'].append(recordNumber)
trace['sensorValue'].append(sensorValue)
trace['actualCategory'].append(actualCategory)
trace['spActiveColumns'].append(spActiveColumns)
trace['tmActiveCells'].append(tmActiveCells)
trace['tmPredictedActiveCells'].append(tmPredictedActiveCells)
trace['anomalyScore'].append(anomalyScore)
trace['classificationInference'].append(classificationInference)
trace['classificationAccuracy'].append(classificationAccuracy)
def outputClassificationInfo(recordNumber,
sensorValue,
actualCategory,
anomalyScore,
classificationInference,
classificationAccuracy):
# Network
_LOGGER.debug('-> recordNumber: %s' % recordNumber)
_LOGGER.debug('-> sensorValue: %s' % sensorValue)
_LOGGER.debug('-> actualCategory: %s' % actualCategory)
_LOGGER.debug('-> anomalyScore: %s' % anomalyScore)
# Classification
_LOGGER.debug('-> classificationInference: %s' % classificationInference)
_LOGGER.debug('-> classificationAccuracy: %s / 1' % classificationAccuracy)
def getTraceFileName(filePath):
return filePath.split('/')[-1].split('.csv')[0]
def getClassifierInference(classifierRegion):
"""Return output categories from the classifier region."""
if classifierRegion.type == 'py.KNNClassifierRegion':
# The use of numpy.lexsort() here is to first sort by labelFreq, then
# sort by random values; this breaks ties in a random manner.
inferenceValues = classifierRegion.getOutputData('categoriesOut')
randomValues = np.random.random(inferenceValues.size)
return np.lexsort((randomValues, inferenceValues))[-1]
else:
return classifierRegion.getOutputData('categoriesOut')[0]
def convertNonZeroToSDR(patternNZs, sdrSize):
sdrs = []
for patternNZ in patternNZs:
sdr = np.zeros(sdrSize)
sdr[patternNZ] = 1
sdrs.append(sdr)
return sdrs
def appendTraceToTraceFile(trace, traceWriter):
numPoints = len(trace['sensorValue'])
for i in range(numPoints):
row = []
for tk in trace.keys():
if trace[tk]:
if type(trace[tk][i]) == np.ndarray:
row.append(json.dumps(trace[tk][i].tolist()))
else:
row.append(trace[tk][i])
else:
row.append(None)
traceWriter.writerow(row)
_LOGGER.info('Wrote trace batch to file.')
def createNetwork(dataSource, networkConfig, serializedModelPath):
if serializedModelPath:
return loadNetwork(serializedModelPath, dataSource)
else:
return createAndConfigureNetwork(dataSource, networkConfig)
def runNetwork(network, networkConfig, traceFilePath, numRecords, batchSize,
learningMode):
(sensorRegion,
spRegion,
tmRegion,
_,
classifierRegion) = setRegionLearning(network, networkConfig,
learningMode=learningMode)
trace = initTrace()
if os.path.exists(traceFilePath):
os.remove(traceFilePath)
with open(traceFilePath, 'a') as traceFile:
traceWriter = csv.writer(traceFile)
headers = trace.keys()
traceWriter.writerow(headers)
for recordNumber in range(numRecords):
network.run(1)
(sensorValue,
actualCategory,
spActiveColumns,
tmActiveCells,
tmPredictedActiveCells,
anomalyScore,
classificationInference,
classificationAccuracy) = computeNetworkStats(sensorRegion,
spRegion,
tmRegion,
classifierRegion)
updateTrace(trace,
recordNumber,
sensorValue,
actualCategory,
spActiveColumns,
tmActiveCells,
tmPredictedActiveCells,
anomalyScore,
classificationInference,
classificationAccuracy)
if recordNumber % batchSize == 0:
outputClassificationInfo(recordNumber,
sensorValue,
actualCategory,
anomalyScore,
classificationInference,
classificationAccuracy)
# To optimize memory usage, write to trace file in batches.
appendTraceToTraceFile(trace, traceWriter)
trace = initTrace()
appendTraceToTraceFile(trace, traceWriter)
_LOGGER.info('%s records processed. Trace saved: %s' % (numRecords,
traceFilePath))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--inputFile', '-d',
dest='inputFile',
type=str,
default=None,
help='Relative path to the input file.')
parser.add_argument('--outputDir', '-o',
dest='outputDir',
type=str,
default='results/traces',
help='Relative path to the directory where the HTM '
'network traces will be saved.')
parser.add_argument('--htmConfig', '-c',
dest='htmConfig',
type=str,
default='htm_network_config/6categories.json',
help='Relative path to the HTM network config JSON. '
'This option is ignored when the --model flag '
'is used.')
parser.add_argument('--inputModel', '-im',
dest='inputModel',
type=str,
default=None,
help='Relative path of the serialized HTM model to be '
'loaded.')
parser.add_argument('--outputModel', '-om',
dest='outputModel',
type=str,
default=None,
help='Relative path to serialize the HTM model.')
parser.add_argument('--disableLearning', '-dl',
dest='disableLearning',
action='store_true',
default=False,
help='Use this flag to disable learning. If not '
'provided, then learning is enabled by default.')
parser.add_argument('--batch', '-b',
dest='batchSize',
type=int,
default=1000,
help='Size of each batch being processed.')
# Parse input options
options = parser.parse_args()
outputDir = options.outputDir
networkConfigPath = options.htmConfig
batchSize = options.batchSize
# FIXME RES-464: until the serialization process is fixed, don't save the
# model. Run serially each phase (train -> validation -> test)
# TODO: Re-introduce these command line args when serialization is fixed.
# inputFile = options.inputFile
# inputModelPath = options.inputModel
# outputModelPath = options.outputModel
# learningMode = not options.disableLearning
inputModelPath = None
outputModelPath = None
phases = ['train', 'val', 'test']
inputDir = os.path.join('data', 'artificial')
expName = 'binary_ampl=10.0_mean=0.0_noise=0.0' # 'body_acc_x_inertial_signals'
network = None
with open(networkConfigPath, 'r') as f:
networkConfig = simplejson.load(f)
for phase in phases:
# Data source
inputFile = os.path.join(inputDir, '%s_%s.csv' % (expName, phase))
dataSource = FileRecordStream(streamID=inputFile)
numRecords = dataSource.getDataRowCount()
_LOGGER.debug('Number of records to be processed: %s' % numRecords)
# Trace output info
traceFileName = getTraceFileName(inputFile)
traceFilePath = os.path.join(outputDir, '%s.csv' % traceFileName)
if not os.path.exists(outputDir):
os.makedirs(outputDir)
# If there is not network, create one and train it.
if not network:
assert phase == 'train' # Make sure that we create a network for
learningMode = True
network = createNetwork(dataSource, networkConfig, inputModelPath)
else:
learningMode = False
regionName = networkConfig["sensorRegionConfig"]["regionName"]
sensorRegion = network.regions[regionName].getSelf()
sensorRegion.dataSource = dataSource
if 'train' in sensorRegion.dataSource._filename:
raise ValueError('Learning mode should not be disabled for the '
'train set.')
_LOGGER.debug('Running network with inputFile=%s '
'and learningMode=%s' % (inputFile, learningMode))
# FIXME RES-464 (end)
run(network,
numRecords,
traceFilePath,
networkConfig,
outputModelPath,
batchSize,
learningMode)
def run(network,
numRecords,
traceFilePath,
networkConfig,
outputModelPath,
batchSize,
learningMode):
# Check input options
if outputModelPath and os.path.exists(outputModelPath):
_LOGGER.warning('There is already a model named %s. This model will be '
'erased.' % outputModelPath)
# HTM network
runNetwork(network, networkConfig, traceFilePath, numRecords, batchSize,
learningMode)
# Save network
if outputModelPath:
saveNetwork(network, outputModelPath)
if __name__ == '__main__':
main()
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.