repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
konstin/mucmiete
settings/base.py
2
3552
""" Django settings for miete project. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os import logging BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) from .preconfig import * try: from .plz_mapping import * except: logging.getLogger(__name__).error('You need to create "settings/plz_mapping.py" by running `./manage.py create_plz_mapping`.') # Application definition if HAVE_ADMIN: ADMIN_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django_admin_bootstrapped', 'django.contrib.admin', 'django.contrib.admindocs', ) ADMIN_MIDDLEWARE = ( 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', ) ADMIN_CONTEXTPROC = ['django.contrib.auth.context_processors.auth'] else: ADMIN_APPS = () ADMIN_MIDDLEWARE = () ADMIN_CONTEXTPROC = [] INSTALLED_APPS = ( 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_extensions', 'captcha', ) + ADMIN_APPS + ( 'miete', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', ) + ADMIN_MIDDLEWARE + ( 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'settings.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', ] + ADMIN_CONTEXTPROC + [ 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'settings.wsgi.application' # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'de-de' LANGUAGES = ( ('en', 'English'), ('de', 'Deutsch'), ) TIME_ZONE = 'Europe/Berlin' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, "static"), # '/var/www/static/', ) LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } # addon packages # No Captcha reCaptcha NOCAPTCHA = True BOOTSTRAP3 = { # The URL to the jQuery JavaScript file 'jquery_url': '/static_jquery/js/jquery.min.js', # The Bootstrap base URL 'base_url': '/static/bootstrap/', }
agpl-3.0
anastasiaguenova/radioactive-decay-simulator
electron/node_modules/accessibility-developer-tools/lib/closure-library/closure/bin/build/jscompiler_test.py
27
3823
#!/usr/bin/env python # # Copyright 2013 The Closure Library Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit test for depstree.""" __author__ = '[email protected] (Nathan Naze)' import os import unittest import jscompiler class JsCompilerTestCase(unittest.TestCase): """Unit tests for jscompiler module.""" def testGetFlagFile(self): flags_file = jscompiler._GetFlagFile( ['path/to/src1.js', 'path/to/src2.js'], ['--test_compiler_flag']) def file_get_contents(filename): with open(filename) as f: content = f.read() f.close() return content flags_file_content = file_get_contents(flags_file.name) os.remove(flags_file.name) self.assertEqual( '--js path/to/src1.js --js path/to/src2.js --test_compiler_flag', flags_file_content) def testGetJsCompilerArgs(self): original_check = jscompiler._JavaSupports32BitMode jscompiler._JavaSupports32BitMode = lambda: False args = jscompiler._GetJsCompilerArgs('path/to/jscompiler.jar', (1, 7), ['--test_jvm_flag']) self.assertEqual( ['java', '-client', '--test_jvm_flag', '-jar', 'path/to/jscompiler.jar'], args) def CheckJava15RaisesError(): jscompiler._GetJsCompilerArgs('path/to/jscompiler.jar', (1, 5), ['--test_jvm_flag']) self.assertRaises(jscompiler.JsCompilerError, CheckJava15RaisesError) jscompiler._JavaSupports32BitMode = original_check def testGetJsCompilerArgs32BitJava(self): original_check = jscompiler._JavaSupports32BitMode # Should include the -d32 flag only if 32-bit Java is supported by the # system. jscompiler._JavaSupports32BitMode = lambda: True args = jscompiler._GetJsCompilerArgs('path/to/jscompiler.jar', (1, 7), ['--test_jvm_flag']) self.assertEqual( ['java', '-d32', '-client', '--test_jvm_flag', '-jar', 'path/to/jscompiler.jar'], args) # Should exclude the -d32 flag if 32-bit Java is not supported by the # system. jscompiler._JavaSupports32BitMode = lambda: False args = jscompiler._GetJsCompilerArgs('path/to/jscompiler.jar', (1, 7), ['--test_jvm_flag']) self.assertEqual( ['java', '-client', '--test_jvm_flag', '-jar', 'path/to/jscompiler.jar'], args) jscompiler._JavaSupports32BitMode = original_check def testGetJavaVersion(self): def assertVersion(expected, version_string): self.assertEquals(expected, jscompiler._ParseJavaVersion(version_string)) assertVersion((1, 7), _TEST_JAVA_VERSION_STRING) assertVersion((1, 6), _TEST_JAVA_NESTED_VERSION_STRING) assertVersion((1, 4), 'java version "1.4.0_03-ea"') _TEST_JAVA_VERSION_STRING = """\ openjdk version "1.7.0-google-v5" OpenJDK Runtime Environment (build 1.7.0-google-v5-64327-39803485) OpenJDK Server VM (build 22.0-b10, mixed mode) """ _TEST_JAVA_NESTED_VERSION_STRING = """\ Picked up JAVA_TOOL_OPTIONS: -Dfile.encoding=UTF-8 java version "1.6.0_35" Java(TM) SE Runtime Environment (build 1.6.0_35-b10-428-11M3811) Java HotSpot(TM) Client VM (build 20.10-b01-428, mixed mode) """ if __name__ == '__main__': unittest.main()
mit
UstadMobile/eXePUB
exe/engine/wikipediaidevice.py
3
15520
# -- coding: utf-8 -- # =========================================================================== # eXe # Copyright 2004-2006, University of Auckland # Copyright 2006-2008 eXe Project, http://eXeLearning.org/ # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # =========================================================================== """ A Wikipedia Idevice is one built from a Wikipedia article. """ import re from BeautifulSoup import BeautifulSoup, Comment from exe.engine.idevice import Idevice from exe.engine.field import TextAreaField from exe.engine.translate import lateTranslate from exe.engine.path import Path, TempDirPath from exe.engine.resource import Resource import urllib class UrlOpener(urllib.FancyURLopener): """ Set a distinctive User-Agent, so Wikipedia.org knows we're not spammers """ version = "eXe/[email protected]" urllib._urlopener = UrlOpener() import logging log = logging.getLogger(__name__) # =========================================================================== class WikipediaIdevice(Idevice): """ A Wikipedia Idevice is one built from a Wikipedia article. """ persistenceVersion = 9 def __init__(self, defaultSite): Idevice.__init__(self, x_(u"Wiki Article"), x_(u"University of Auckland"), x_(u"""<p>The Wikipedia iDevice allows you to locate existing content from within Wikipedia and download this content into your eXe resource. The Wikipedia Article iDevice takes a snapshot copy of the article content. Changes in Wikipedia will not automatically update individual snapshot copies in eXe, a fresh copy of the article will need to be taken. Likewise, changes made in eXe will not be updated in Wikipedia. </p> <p>Wikipedia content is covered by the GNU free documentation license.</p>"""), u"", u"") self.emphasis = Idevice.NoEmphasis self.articleName = u"" self.article = TextAreaField(x_(u"Article")) self.article.idevice = self self.images = {} self.site = defaultSite self.icon = u"inter" self._langInstruc = x_(u"""Select the appropriate language version of Wikipedia to search and enter search term.""") self._searchInstruc = x_("""Enter a phrase or term you wish to search within Wikipedia.""") self.ownUrl = "" self.systemResources += ['exe_wikipedia.css'] # Properties langInstruc = lateTranslate('langInstruc') searchInstruc = lateTranslate('searchInstruc') def loadArticle(self, name): """ Load the article from Wikipedia """ self.articleName = name url = "" name = urllib.quote(name.replace(" ", "_").encode('utf-8')) try: url = (self.site or self.ownUrl) if not url.endswith('/') and name <> '': url += '/' if '://' not in url: url = 'http://' + url url += name net = urllib.urlopen(url) page = net.read() net.close() except IOError, error: log.warning(unicode(error)) self.article.content = _(u"Unable to download from %s <br/>Please check the spelling and connection and try again.") % url self.article.content_w_resourcePaths = self.article.content self.article.content_wo_resourcePaths = self.article.content return page = unicode(page, "utf8") # FIXME avoid problems with numeric entities in attributes page = page.replace(u'&#160;', u'&nbsp;') # avoidParserProblems is set to False because BeautifulSoup's # cleanup was causing a "concatenating Null+Str" error, # and Wikipedia's HTML doesn't need cleaning up. # BeautifulSoup is faster this way too. soup = BeautifulSoup(page, False) content = soup.first('div', {'id': "content"}) #Fix bug #1359: El estilo ITE no respeta ancho de página al exportar #a páginas web si se usa iDevice wikipedia content['id'] = "wikipedia-content" # remove the wiktionary, wikimedia commons, and categories boxes # and the protected icon and the needs citations box if content: infoboxes = content.findAll('div', {'class' : 'infobox sisterproject'}) [infobox.extract() for infobox in infoboxes] catboxes = content.findAll('div', {'id' : 'catlinks'}) [catbox.extract() for catbox in catboxes] amboxes = content.findAll('table', {'class' : re.compile(r'.*\bambox\b.*')}) [ambox.extract() for ambox in amboxes] protecteds = content.findAll('div', {'id' : 'protected-icon'}) [protected.extract() for protected in protecteds] # Extract HTML comments comments = content.findAll(text=lambda text:isinstance(text, Comment)) [comment.extract() for comment in comments] else: content = soup.first('body') if not content: log.error("no content") self.article.content = _(u"Unable to download from %s <br/>Please check the spelling and connection and try again.") % url # set the other elements as well self.article.content_w_resourcePaths = self.article.content self.article.content_wo_resourcePaths = self.article.content return # clear out any old images while self.userResources: self.userResources[0].delete() self.images = {} # Download the images bits = url.split('/') netloc = '%s//%s' % (bits[0], bits[2]) path = '/'.join(bits[3:-1]) tmpDir = TempDirPath() for imageTag in content.fetch('img'): imageSrc = unicode(imageTag['src']) imageName = imageSrc.split('/')[-1] imageName = imageName.replace('&gt;', '>') imageName = imageName.replace('&lt;', '<') imageName = imageName.replace('&quot;', '"') imageName = imageName.replace('&nbsp;', '') imageName = imageName.replace('%2C', ',') imageName = imageName.replace('%22', '"') imageName = imageName.replace('%28', '(') imageName = imageName.replace('%29', ')') imageName = imageName.replace('%C3%A5', 'å') #JR: decodificamos el nombre de la imagen imageName = urllib.unquote(imageName) # Search if we've already got this image if imageName not in self.images: if not imageSrc.startswith("http://"): if imageSrc.startswith("/"): # imageSrc = netloc + imageSrc imageSrc = bits[0] + imageSrc else: imageSrc = '%s/%s/%s' % (netloc, path, imageSrc) try: # download whith its original name... in ASCII: ## er... just because some repositories do not undestand no ascii names of files: imageName = imageName.encode('ascii', 'ignore') urllib.urlretrieve(imageSrc, tmpDir/imageName) new_resource = Resource(self, tmpDir/imageName) except: print 'Unable to download file' if new_resource._storageName != imageName: # looks like it was changed due to a possible conflict, # so reset the imageName accordingly for the content: imageName = new_resource._storageName self.images[imageName] = True imageTag['src'] = (u"resources/" + imageName) self.article.content = self.reformatArticle(netloc, unicode(content)) # now that these are supporting images, any direct manipulation # of the content field must also store this updated information # into the other corresponding fields of TextAreaField: # (perhaps eventually a property should be made for TextAreaField # such that these extra set's are not necessary, but for now, here:) self.article.content_w_resourcePaths = self.article.content self.article.content_wo_resourcePaths = self.article.content def reformatArticle(self, netloc, content): """ Changes links, etc """ content = re.sub(r'href="/', r'href="%s/' % netloc, content) content = re.sub(r'<(span|div)\s+(id|class)="(editsection|jump-to-nav)".*?</\1>', '', content) #TODO Find a way to remove scripts without removing newlines content = content.replace("\n", " ") content = re.sub(r'<script.*?</script>', '', content) return content def getResourcesField(self, this_resource): """ implement the specific resource finding mechanism for this iDevice: """ # be warned that before upgrading, this iDevice field could not exist: if hasattr(self, 'article') and hasattr(self.article, 'images'): for this_image in self.article.images: if hasattr(this_image, '_imageResource') \ and this_resource == this_image._imageResource: return self.article # NOTE that WikipediaIdevices list their images # in the idevice's .userResources, not in its .article.images... # a slightly different (and earlier) approach to embedding images: for this_image in self.userResources: if this_resource == this_image: return self.article return None def getRichTextFields(self): """ Like getResourcesField(), a general helper to allow nodes to search through all of their fields without having to know the specifics of each iDevice type. """ fields_list = [] if hasattr(self, 'article'): fields_list.append(self.article) return fields_list def burstHTML(self, i): """ takes a BeautifulSoup fragment (i) and bursts its contents to import this idevice from a CommonCartridge export """ # Wiki Article Idevice: # option title for Wikipedia, with mode emphasis: title = i.find(name='h2', attrs={'class' : 'iDeviceTitle' }) if title is not None: self.title = title.renderContents().decode('utf-8') self.emphasis=Idevice.SomeEmphasis wiki = i.find(name='div', attrs={'id' : re.compile('^ta') }) self.article.content_wo_resourcePaths = \ wiki.renderContents().decode('utf-8') # and add the LOCAL resource paths back in: self.article.content_w_resourcePaths = \ self.article.MassageResourceDirsIntoContent( \ self.article.content_wo_resourcePaths) self.article.content = self.article.content_w_resourcePaths site = i.find(name='div', attrs={'class' : 'wiki_site' }) if site is not None: self.site = site.attrMap['value'].decode('utf-8') name = i.find(name='div', attrs={'class' : 'article_name' }) if name is not None: # WARNING: the following crashes on accented characters, eg: # 'ascii' codec can't encode character u'\xe8' in # position 11: ordinal not in range(128) self.articleName = name.attrMap['value'].decode('utf-8') own_url = i.find(name='div', attrs={'class' : 'own_url' }) if own_url is not None: self.own_url = own_url.attrMap['value'].decode('utf-8') def __getstate__(self): """ Re-write the img URLs just in case the class name has changed """ log.debug("in __getstate__ " + repr(self.parentNode)) # need to check parentNode because __getstate__ is also called by # deepcopy as well as Jelly. if self.parentNode: self.article.content = re.sub(r'/[^/]*?/', u"/" + self.parentNode.package.name + u"/", self.article.content) #self.article.content = re.sub(r'/[^/]*?/resources/', # u"/" + self.parentNode.package.name + # u"/resources/", # self.article.content) return Idevice.__getstate__(self) def delete(self): """ Clear out any old images when this iDevice is deleted """ self.images = {} Idevice.delete(self) def upgradeToVersion1(self): """ Called to upgrade from 0.6 release """ self.site = _('http://en.wikipedia.org/') def upgradeToVersion2(self): """ Upgrades v0.6 to v0.7. """ self.lastIdevice = False def upgradeToVersion3(self): """ Upgrades exe to v0.10 """ self._upgradeIdeviceToVersion1() self._site = self.__dict__['site'] def upgradeToVersion4(self): """ Upgrades exe to v0.11... what was I thinking? """ self.site = self.__dict__['_site'] def upgradeToVersion5(self): """ Upgrades exe to v0.11... forgot to change the icon """ self.icon = u"inter" def upgradeToVersion6(self): """ Upgrades to v0.12 """ self._upgradeIdeviceToVersion2() self.systemResources += ["fdl.html"] if self.images and self.parentNode: for image in self.images: imageResource = Resource(self, Path(image)) def upgradeToVersion7(self): """ Upgrades to v0.12 """ self._langInstruc = x_(u"""Select the appropriate language version of Wikipedia to search and enter search term.""") self._searchInstruc = x_("""Enter a phrase or term you wish to search within Wikipedia.""") def upgradeToVersion8(self): """ Upgrades to v0.19 """ self.ownUrl = "" def upgradeToVersion9(self): if 'fdl.html' in self.systemResources: self.systemResources.remove('fdl.html') if 'exe_wikipedia.css' not in self.systemResources: self.systemResources.append('exe_wikipedia.css') # ===========================================================================
gpl-2.0
edxzw/edx-platform
cms/djangoapps/contentstore/views/tests/utils.py
198
2922
""" Utilities for view tests. """ import json from contentstore.tests.utils import CourseTestCase from contentstore.views.helpers import xblock_studio_url from xmodule.modulestore.tests.factories import ItemFactory class StudioPageTestCase(CourseTestCase): """ Base class for all tests of Studio pages. """ def setUp(self): super(StudioPageTestCase, self).setUp() self.chapter = ItemFactory.create(parent_location=self.course.location, category='chapter', display_name="Week 1") self.sequential = ItemFactory.create(parent_location=self.chapter.location, category='sequential', display_name="Lesson 1") def get_page_html(self, xblock): """ Returns the HTML for the page representing the xblock. """ url = xblock_studio_url(xblock) self.assertIsNotNone(url) resp = self.client.get_html(url) self.assertEqual(resp.status_code, 200) return resp.content def get_preview_html(self, xblock, view_name): """ Returns the HTML for the xblock when shown within a unit or container page. """ preview_url = '/xblock/{usage_key}/{view_name}'.format(usage_key=xblock.location, view_name=view_name) resp = self.client.get_json(preview_url) self.assertEqual(resp.status_code, 200) resp_content = json.loads(resp.content) return resp_content['html'] def validate_preview_html(self, xblock, view_name, can_add=True): """ Verify that the specified xblock's preview has the expected HTML elements. """ html = self.get_preview_html(xblock, view_name) self.validate_html_for_add_buttons(html, can_add) # Verify drag handles always appear. drag_handle_html = '<span data-tooltip="Drag to reorder" class="drag-handle action"></span>' self.assertIn(drag_handle_html, html) # Verify that there are no action buttons for public blocks expected_button_html = [ '<a href="#" class="edit-button action-button">', '<a href="#" data-tooltip="Delete" class="delete-button action-button">', '<a href="#" data-tooltip="Duplicate" class="duplicate-button action-button">' ] for button_html in expected_button_html: self.assertIn(button_html, html) def validate_html_for_add_buttons(self, html, can_add=True): """ Validate that the specified HTML has the appropriate add actions for the current publish state. """ # Verify that there are no add buttons for public blocks add_button_html = '<div class="add-xblock-component new-component-item adding"></div>' if can_add: self.assertIn(add_button_html, html) else: self.assertNotIn(add_button_html, html)
agpl-3.0
broferek/ansible
lib/ansible/plugins/become/enable.py
43
1422
# -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ become: enable short_description: Switch to elevated permissions on a network device description: - This become plugins allows elevated permissions on a remote network device. author: ansible (@core) version_added: "2.8" options: become_pass: description: password ini: - section: enable_become_plugin key: password vars: - name: ansible_become_password - name: ansible_become_pass - name: ansible_enable_pass env: - name: ANSIBLE_BECOME_PASS - name: ANSIBLE_ENABLE_PASS notes: - enable is really implemented in the network connection handler and as such can only be used with network connections. - This plugin ignores the 'become_exe' and 'become_user' settings as it uses an API and not an executable. """ from ansible.plugins.become import BecomeBase class BecomeModule(BecomeBase): name = 'enable' def build_become_command(self, cmd, shell): # enable is implemented inside the network connection plugins return cmd
gpl-3.0
robbinfan/thirdparty
protobuf-2.6.1/python/google/protobuf/pyext/descriptor_cpp2_test.py
73
2506
#! /usr/bin/python # # Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # https://developers.google.com/protocol-buffers/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests for google.protobuf.pyext behavior.""" __author__ = '[email protected] (Anuraag Agrawal)' import os os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp' os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION'] = '2' # We must set the implementation version above before the google3 imports. # pylint: disable=g-import-not-at-top from google.apputils import basetest from google.protobuf.internal import api_implementation # Run all tests from the original module by putting them in our namespace. # pylint: disable=wildcard-import from google.protobuf.internal.descriptor_test import * class ConfirmCppApi2Test(basetest.TestCase): def testImplementationSetting(self): self.assertEqual('cpp', api_implementation.Type()) self.assertEqual(2, api_implementation.Version()) if __name__ == '__main__': basetest.main()
bsd-2-clause
epssy/hue
apps/useradmin/src/useradmin/test_ldap_deprecated.py
4
32107
#!/usr/bin/env python # -*- coding: utf-8 -*- # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ldap from nose.plugins.attrib import attr from nose.tools import assert_true, assert_equal, assert_false import desktop.conf from desktop.lib.test_utils import grant_access from desktop.lib.django_test_util import make_logged_in_client from django.conf import settings from django.contrib.auth.models import User, Group from django.core.urlresolvers import reverse from useradmin.models import LdapGroup, UserProfile, get_profile from hadoop import pseudo_hdfs4 from views import sync_ldap_users, sync_ldap_groups, import_ldap_users, import_ldap_groups, \ add_ldap_users, add_ldap_groups, sync_ldap_users_groups import ldap_access from tests import LdapTestConnection, reset_all_groups, reset_all_users def test_useradmin_ldap_user_group_membership_sync(): settings.MIDDLEWARE_CLASSES.append('useradmin.middleware.LdapSynchronizationMiddleware') reset_all_users() reset_all_groups() # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() # Make sure LDAP groups exist or they won't sync import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False) import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False) try: # Import curly who is part of TestUsers and Test Administrators import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=False, import_by_dn=False) # Set a password so that we can login user = User.objects.get(username='curly') user.set_password('test') user.save() # Should have 0 groups assert_equal(0, user.groups.all().count()) # Make an authenticated request as curly so that we can see call middleware. c = make_logged_in_client('curly', 'test', is_superuser=False) grant_access("curly", "test", "useradmin") response = c.get('/useradmin/users') # Refresh user groups user = User.objects.get(username='curly') # Should have 3 groups now. 2 from LDAP and 1 from 'grant_access' call. assert_equal(3, user.groups.all().count(), user.groups.all()) # Now remove a group and try again. old_group = ldap_access.CACHED_LDAP_CONN._instance.users['curly']['groups'].pop() # Make an authenticated request as curly so that we can see call middleware. response = c.get('/useradmin/users') # Refresh user groups user = User.objects.get(username='curly') # Should have 2 groups now. 1 from LDAP and 1 from 'grant_access' call. assert_equal(3, user.groups.all().count(), user.groups.all()) finally: settings.MIDDLEWARE_CLASSES.remove('useradmin.middleware.LdapSynchronizationMiddleware') def test_useradmin_ldap_suboordinate_group_integration(): reset_all_users() reset_all_groups() reset = [] # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() # Test old subgroups reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("suboordinate")) try: # Import groups only import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False) test_users = Group.objects.get(name='TestUsers') assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(test_users.user_set.all().count(), 0) # Import all members of TestUsers import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False) test_users = Group.objects.get(name='TestUsers') assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(test_users.user_set.all().count(), 3) # Should import a group, but will only sync already-imported members import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_equal(User.objects.all().count(), 3) assert_equal(Group.objects.all().count(), 2) test_admins = Group.objects.get(name='Test Administrators') assert_equal(test_admins.user_set.all().count(), 2) larry = User.objects.get(username='lårry') assert_equal(test_admins.user_set.all()[0].username, larry.username) # Only sync already imported ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers') import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_equal(test_users.user_set.all().count(), 2) assert_equal(User.objects.get(username='moe').groups.all().count(), 0) # Import missing user ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers') import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_equal(test_users.user_set.all().count(), 3) assert_equal(User.objects.get(username='moe').groups.all().count(), 1) # Import all members of TestUsers and members of subgroups import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False) test_users = Group.objects.get(name='TestUsers') assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(test_users.user_set.all().count(), 4) # Make sure Hue groups with naming collisions don't get marked as LDAP groups hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy') hue_group = Group.objects.create(name='OtherGroup') hue_group.user_set.add(hue_user) hue_group.save() import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_false(LdapGroup.objects.filter(group=hue_group).exists()) assert_true(hue_group.user_set.filter(username=hue_user.username).exists()) finally: for finish in reset: finish() def test_useradmin_ldap_nested_group_integration(): reset_all_users() reset_all_groups() reset = [] # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() # Test old subgroups reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("nested")) try: # Import groups only import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False) test_users = Group.objects.get(name='TestUsers') assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(test_users.user_set.all().count(), 0) # Import all members of TestUsers import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False) test_users = Group.objects.get(name='TestUsers') assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(test_users.user_set.all().count(), 3) # Should import a group, but will only sync already-imported members import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_equal(User.objects.all().count(), 3) assert_equal(Group.objects.all().count(), 2) test_admins = Group.objects.get(name='Test Administrators') assert_equal(test_admins.user_set.all().count(), 2) larry = User.objects.get(username='lårry') assert_equal(test_admins.user_set.all()[0].username, larry.username) # Only sync already imported assert_equal(test_users.user_set.all().count(), 3) ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers') import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_equal(test_users.user_set.all().count(), 2) assert_equal(User.objects.get(username='moe').groups.all().count(), 0) # Import missing user ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers') import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_equal(test_users.user_set.all().count(), 3) assert_equal(User.objects.get(username='moe').groups.all().count(), 1) # Import all members of TestUsers and not members of suboordinate groups (even though specified) import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False) test_users = Group.objects.get(name='TestUsers') assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(test_users.user_set.all().count(), 3) # Nested group import # First without recursive import, then with. import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedGroups', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False) nested_groups = Group.objects.get(name='NestedGroups') nested_group = Group.objects.get(name='NestedGroup') assert_true(LdapGroup.objects.filter(group=nested_groups).exists()) assert_true(LdapGroup.objects.filter(group=nested_group).exists()) assert_equal(nested_groups.user_set.all().count(), 0, nested_groups.user_set.all()) assert_equal(nested_group.user_set.all().count(), 0, nested_group.user_set.all()) import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedGroups', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False) nested_groups = Group.objects.get(name='NestedGroups') nested_group = Group.objects.get(name='NestedGroup') assert_true(LdapGroup.objects.filter(group=nested_groups).exists()) assert_true(LdapGroup.objects.filter(group=nested_group).exists()) assert_equal(nested_groups.user_set.all().count(), 0, nested_groups.user_set.all()) assert_equal(nested_group.user_set.all().count(), 1, nested_group.user_set.all()) # Make sure Hue groups with naming collisions don't get marked as LDAP groups hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy') hue_group = Group.objects.create(name='OtherGroup') hue_group.user_set.add(hue_user) hue_group.save() import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_false(LdapGroup.objects.filter(group=hue_group).exists()) assert_true(hue_group.user_set.filter(username=hue_user.username).exists()) finally: for finish in reset: finish() def test_useradmin_ldap_suboordinate_posix_group_integration(): reset_all_users() reset_all_groups() reset = [] # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() # Test old subgroups reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("suboordinate")) try: # Import groups only import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False) test_users = Group.objects.get(name='PosixGroup') assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(test_users.user_set.all().count(), 0) # Import all members of TestUsers import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False) test_users = Group.objects.get(name='PosixGroup') assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(test_users.user_set.all().count(), 2) # Should import a group, but will only sync already-imported members import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_equal(User.objects.all().count(), 2, User.objects.all()) assert_equal(Group.objects.all().count(), 2, Group.objects.all()) test_admins = Group.objects.get(name='Test Administrators') assert_equal(test_admins.user_set.all().count(), 1) larry = User.objects.get(username='lårry') assert_equal(test_admins.user_set.all()[0].username, larry.username) # Only sync already imported ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup') import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_equal(test_users.user_set.all().count(), 1) assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0) # Import missing user ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup') import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_equal(test_users.user_set.all().count(), 2) assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1) # Import all members of PosixGroup and members of subgroups import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False) test_users = Group.objects.get(name='PosixGroup') assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(test_users.user_set.all().count(), 3) # Make sure Hue groups with naming collisions don't get marked as LDAP groups hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy') hue_group = Group.objects.create(name='OtherGroup') hue_group.user_set.add(hue_user) hue_group.save() import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_false(LdapGroup.objects.filter(group=hue_group).exists()) assert_true(hue_group.user_set.filter(username=hue_user.username).exists()) finally: for finish in reset: finish() def test_useradmin_ldap_nested_posix_group_integration(): reset_all_users() reset_all_groups() reset = [] # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() # Test nested groups reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("nested")) try: # Import groups only import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False) test_users = Group.objects.get(name='PosixGroup') assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(test_users.user_set.all().count(), 0) # Import all members of TestUsers import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False) test_users = Group.objects.get(name='PosixGroup') assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(test_users.user_set.all().count(), 2) # Should import a group, but will only sync already-imported members import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_equal(User.objects.all().count(), 2, User.objects.all()) assert_equal(Group.objects.all().count(), 2, Group.objects.all()) test_admins = Group.objects.get(name='Test Administrators') assert_equal(test_admins.user_set.all().count(), 1) larry = User.objects.get(username='lårry') assert_equal(test_admins.user_set.all()[0].username, larry.username) # Only sync already imported ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup') import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_equal(test_users.user_set.all().count(), 1) assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0) # Import missing user ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup') import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_equal(test_users.user_set.all().count(), 2) assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1) # Import all members of PosixGroup and members of subgroups (there should be no subgroups) import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False) test_users = Group.objects.get(name='PosixGroup') assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(test_users.user_set.all().count(), 2) # Import all members of NestedPosixGroups and members of subgroups reset_all_users() reset_all_groups() import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedPosixGroups', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False) test_users = Group.objects.get(name='NestedPosixGroups') assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(test_users.user_set.all().count(), 0) test_users = Group.objects.get(name='PosixGroup') assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(test_users.user_set.all().count(), 2) # Make sure Hue groups with naming collisions don't get marked as LDAP groups hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy') hue_group = Group.objects.create(name='OtherGroup') hue_group.user_set.add(hue_user) hue_group.save() import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False) assert_false(LdapGroup.objects.filter(group=hue_group).exists()) assert_true(hue_group.user_set.filter(username=hue_user.username).exists()) finally: for finish in reset: finish() def test_useradmin_ldap_user_integration(): done = [] try: reset_all_users() reset_all_groups() # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() # Try importing a user import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'lårry', sync_groups=False, import_by_dn=False) larry = User.objects.get(username='lårry') assert_true(larry.first_name == 'Larry') assert_true(larry.last_name == 'Stooge') assert_true(larry.email == '[email protected]') assert_true(get_profile(larry).creation_method == str(UserProfile.CreationMethod.EXTERNAL)) # Should be a noop sync_ldap_users(ldap_access.CACHED_LDAP_CONN) sync_ldap_groups(ldap_access.CACHED_LDAP_CONN) assert_equal(User.objects.all().count(), 1) assert_equal(Group.objects.all().count(), 0) # Make sure that if a Hue user already exists with a naming collision, we # won't overwrite any of that user's information. hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy') import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'otherguy', sync_groups=False, import_by_dn=False) hue_user = User.objects.get(username='otherguy') assert_equal(get_profile(hue_user).creation_method, str(UserProfile.CreationMethod.HUE)) assert_equal(hue_user.first_name, 'Different') # Make sure LDAP groups exist or they won't sync import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False) import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False) # Try importing a user and sync groups import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=True, import_by_dn=False) curly = User.objects.get(username='curly') assert_equal(curly.first_name, 'Curly') assert_equal(curly.last_name, 'Stooge') assert_equal(curly.email, '[email protected]') assert_equal(get_profile(curly).creation_method, str(UserProfile.CreationMethod.EXTERNAL)) assert_equal(2, curly.groups.all().count(), curly.groups.all()) reset_all_users() reset_all_groups() # Test import case sensitivity done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True)) import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Lårry', sync_groups=False, import_by_dn=False) assert_false(User.objects.filter(username='Lårry').exists()) assert_true(User.objects.filter(username='lårry').exists()) # Test lower case User.objects.filter(username__iexact='Rock').delete() import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False) assert_false(User.objects.filter(username='Rock').exists()) assert_true(User.objects.filter(username='rock').exists()) done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True)) import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False) assert_false(User.objects.filter(username='Rock').exists()) assert_true(User.objects.filter(username='rock').exists()) User.objects.filter(username='Rock').delete() import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False) assert_false(User.objects.filter(username='Rock').exists()) assert_true(User.objects.filter(username='rock').exists()) finally: for finish in done: finish() def test_add_ldap_users(): done = [] try: URL = reverse(add_ldap_users) reset_all_users() reset_all_groups() # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() c = make_logged_in_client('test', is_superuser=True) assert_true(c.get(URL)) response = c.post(URL, dict(username_pattern='moe', password1='test', password2='test')) assert_true('Location' in response, response) assert_true('/useradmin/users' in response['Location'], response) response = c.post(URL, dict(username_pattern='bad_name', password1='test', password2='test')) assert_true('Could not' in response.context['form'].errors['username_pattern'][0], response) # Test wild card response = c.post(URL, dict(username_pattern='*rr*', password1='test', password2='test')) assert_true('/useradmin/users' in response['Location'], response) # Test ignore case done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True)) User.objects.filter(username='moe').delete() assert_false(User.objects.filter(username='Moe').exists()) assert_false(User.objects.filter(username='moe').exists()) response = c.post(URL, dict(username_pattern='Moe', password1='test', password2='test')) assert_true('Location' in response, response) assert_true('/useradmin/users' in response['Location'], response) assert_false(User.objects.filter(username='Moe').exists()) assert_true(User.objects.filter(username='moe').exists()) # Test lower case done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True)) User.objects.filter(username__iexact='Rock').delete() assert_false(User.objects.filter(username='Rock').exists()) assert_false(User.objects.filter(username='rock').exists()) response = c.post(URL, dict(username_pattern='rock', password1='test', password2='test')) assert_true('Location' in response, response) assert_true('/useradmin/users' in response['Location'], response) assert_false(User.objects.filter(username='Rock').exists()) assert_true(User.objects.filter(username='rock').exists()) # Test regular with spaces (should fail) response = c.post(URL, dict(username_pattern='user with space', password1='test', password2='test')) assert_true("Username must not contain whitespaces and ':'" in response.context['form'].errors['username_pattern'][0], response) # Test dn with spaces in username and dn (should fail) response = c.post(URL, dict(username_pattern='uid=user with space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True)) assert_true("There was a problem with some of the LDAP information" in response.content, response) assert_true("Username must not contain whitespaces" in response.content, response) # Test dn with spaces in dn, but not username (should succeed) response = c.post(URL, dict(username_pattern='uid=user without space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True)) assert_true(User.objects.filter(username='spaceless').exists()) finally: for finish in done: finish() def test_add_ldap_groups(): URL = reverse(add_ldap_groups) reset_all_users() reset_all_groups() # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() c = make_logged_in_client(username='test', is_superuser=True) assert_true(c.get(URL)) response = c.post(URL, dict(groupname_pattern='TestUsers')) assert_true('Location' in response, response) assert_true('/useradmin/groups' in response['Location']) # Test with space response = c.post(URL, dict(groupname_pattern='Test Administrators')) assert_true('Location' in response, response) assert_true('/useradmin/groups' in response['Location'], response) response = c.post(URL, dict(groupname_pattern='toolongnametoolongnametoolongnametoolongnametoolongnametoolongname' 'toolongnametoolongnametoolongnametoolongnametoolongnametoolongname' 'toolongnametoolongnametoolongnametoolongnametoolongnametoolongname' 'toolongnametoolongnametoolongnametoolongnametoolongnametoolongname')) assert_true('Ensure this value has at most 256 characters' in response.context['form'].errors['groupname_pattern'][0], response) # Test wild card response = c.post(URL, dict(groupname_pattern='*r*')) assert_true('/useradmin/groups' in response['Location'], response) def test_sync_ldap_users_groups(): URL = reverse(sync_ldap_users_groups) reset_all_users() reset_all_groups() # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() c = make_logged_in_client('test', is_superuser=True) assert_true(c.get(URL)) assert_true(c.post(URL)) def test_ldap_exception_handling(): reset_all_users() reset_all_groups() # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection class LdapTestConnectionError(LdapTestConnection): def find_users(self, user, find_by_dn=False): raise ldap.LDAPError('No such object') ldap_access.CACHED_LDAP_CONN = LdapTestConnectionError() c = make_logged_in_client('test', is_superuser=True) response = c.post(reverse(add_ldap_users), dict(username_pattern='moe', password1='test', password2='test'), follow=True) assert_true('There was an error when communicating with LDAP' in response.content, response) @attr('requires_hadoop') def test_ensure_home_directory_add_ldap_users(): try: URL = reverse(add_ldap_users) reset_all_users() reset_all_groups() # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() cluster = pseudo_hdfs4.shared_cluster() c = make_logged_in_client(cluster.superuser, is_superuser=True) cluster.fs.setuser(cluster.superuser) assert_true(c.get(URL)) response = c.post(URL, dict(username_pattern='moe', password1='test', password2='test')) assert_true('/useradmin/users' in response['Location']) assert_false(cluster.fs.exists('/user/moe')) # Try same thing with home directory creation. response = c.post(URL, dict(username_pattern='curly', password1='test', password2='test', ensure_home_directory=True)) assert_true('/useradmin/users' in response['Location']) assert_true(cluster.fs.exists('/user/curly')) response = c.post(URL, dict(username_pattern='bad_name', password1='test', password2='test')) assert_true('Could not' in response.context['form'].errors['username_pattern'][0]) assert_false(cluster.fs.exists('/user/bad_name')) # See if moe, who did not ask for his home directory, has a home directory. assert_false(cluster.fs.exists('/user/moe')) # Try wild card now response = c.post(URL, dict(username_pattern='*rr*', password1='test', password2='test', ensure_home_directory=True)) assert_true('/useradmin/users' in response['Location']) assert_true(cluster.fs.exists('/user/curly')) assert_true(cluster.fs.exists(u'/user/lårry')) assert_false(cluster.fs.exists('/user/otherguy')) finally: # Clean up if cluster.fs.exists('/user/curly'): cluster.fs.rmtree('/user/curly') if cluster.fs.exists(u'/user/lårry'): cluster.fs.rmtree(u'/user/lårry') if cluster.fs.exists('/user/otherguy'): cluster.fs.rmtree('/user/otherguy') @attr('requires_hadoop') def test_ensure_home_directory_sync_ldap_users_groups(): URL = reverse(sync_ldap_users_groups) reset_all_users() reset_all_groups() # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() cluster = pseudo_hdfs4.shared_cluster() c = make_logged_in_client(cluster.superuser, is_superuser=True) cluster.fs.setuser(cluster.superuser) c.post(reverse(add_ldap_users), dict(username_pattern='curly', password1='test', password2='test')) assert_false(cluster.fs.exists('/user/curly')) assert_true(c.post(URL, dict(ensure_home_directory=True))) assert_true(cluster.fs.exists('/user/curly'))
apache-2.0
sanguinariojoe/FreeCAD
src/Mod/Part/BOPTools/JoinFeatures.py
13
15669
#/*************************************************************************** # * Copyright (c) 2016 Victor Titov (DeepSOIC) <[email protected]> * # * * # * This file is part of the FreeCAD CAx development system. * # * * # * This library is free software; you can redistribute it and/or * # * modify it under the terms of the GNU Library General Public * # * License as published by the Free Software Foundation; either * # * version 2 of the License, or (at your option) any later version. * # * * # * This library is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU Library General Public License for more details. * # * * # * You should have received a copy of the GNU Library General Public * # * License along with this library; see the file COPYING.LIB. If not, * # * write to the Free Software Foundation, Inc., 59 Temple Place, * # * Suite 330, Boston, MA 02111-1307, USA * # * * # ***************************************************************************/ __title__ = "BOPTools.JoinFeatures module" __author__ = "DeepSOIC" __url__ = "http://www.freecadweb.org" __doc__ = "Implementation of document objects (features) for connect, ebmed and cutout operations." from . import JoinAPI import FreeCAD import Part if FreeCAD.GuiUp: import FreeCADGui from PySide import QtCore, QtGui # -------------------------- common stuff ------------------------------------- # -------------------------- translation-related code ------------------------- # Thanks, yorik! (see forum thread "A new Part tool is being born... JoinFeatures!" # http://forum.freecadweb.org/viewtopic.php?f=22&t=11112&start=30#p90239 ) try: _fromUtf8 = QtCore.QString.fromUtf8 except Exception: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) else: def _translate(context, text, disambig): return text # --------------------------/translation-related code ------------------------- def getParamRefine(): return FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Part/Boolean").GetBool("RefineModel") def cmdCreateJoinFeature(name, mode): """cmdCreateJoinFeature(name, mode): generalized implementation of GUI commands.""" sel = FreeCADGui.Selection.getSelectionEx() FreeCAD.ActiveDocument.openTransaction("Create "+mode) FreeCADGui.addModule("BOPTools.JoinFeatures") FreeCADGui.doCommand("j = BOPTools.JoinFeatures.make{mode}(name='{name}')".format(mode=mode, name=name)) if mode == "Embed" or mode == "Cutout": FreeCADGui.doCommand("j.Base = App.ActiveDocument."+sel[0].Object.Name) FreeCADGui.doCommand("j.Tool = App.ActiveDocument."+sel[1].Object.Name) elif mode == "Connect": FreeCADGui.doCommand("j.Objects = {sel}".format( sel= "[" + ", ".join(["App.ActiveDocument."+so.Object.Name for so in sel]) + "]" )) else: raise ValueError("cmdCreateJoinFeature: Unexpected mode {mode}".format(mode=repr(mode))) try: FreeCADGui.doCommand("j.Proxy.execute(j)") FreeCADGui.doCommand("j.purgeTouched()") except Exception as err: mb = QtGui.QMessageBox() mb.setIcon(mb.Icon.Warning) mb.setText(_translate("Part_JoinFeatures", "Computing the result failed with an error:\n\n" "{err}\n\n" "Click 'Continue' to create the feature anyway, or 'Abort' to cancel.", None) .format(err=str(err))) mb.setWindowTitle(_translate("Part_JoinFeatures","Bad selection", None)) btnAbort = mb.addButton(QtGui.QMessageBox.StandardButton.Abort) btnOK = mb.addButton(_translate("Part_JoinFeatures","Continue",None), QtGui.QMessageBox.ButtonRole.ActionRole) mb.setDefaultButton(btnOK) mb.exec_() if mb.clickedButton() is btnAbort: FreeCAD.ActiveDocument.abortTransaction() return FreeCADGui.doCommand("for obj in j.ViewObject.Proxy.claimChildren():\n" " obj.ViewObject.hide()") FreeCAD.ActiveDocument.commitTransaction() def getIconPath(icon_dot_svg): return icon_dot_svg # -------------------------- /common stuff ------------------------------------ # -------------------------- Connect ------------------------------------------ def makeConnect(name): '''makeConnect(name): makes an Connect object.''' obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name) FeatureConnect(obj) if FreeCAD.GuiUp: ViewProviderConnect(obj.ViewObject) return obj class FeatureConnect: """The PartJoinFeature object.""" def __init__(self,obj): obj.addProperty("App::PropertyLinkList","Objects","Connect","Object to be connectded.") obj.addProperty("App::PropertyBool","Refine","Connect", "True = refine resulting shape. False = output as is.") obj.Refine = getParamRefine() obj.addProperty("App::PropertyLength","Tolerance","Connect", "Tolerance when intersecting (fuzzy value). " "In addition to tolerances of the shapes.") obj.Proxy = self self.Type = "FeatureConnect" def execute(self,selfobj): rst = JoinAPI.connect([obj.Shape for obj in selfobj.Objects], selfobj.Tolerance) if selfobj.Refine: rst = rst.removeSplitter() selfobj.Shape = rst class ViewProviderConnect: """A View Provider for the Part Connect feature.""" def __init__(self,vobj): vobj.Proxy = self def getIcon(self): return getIconPath("Part_JoinConnect.svg") def attach(self, vobj): self.ViewObject = vobj self.Object = vobj.Object def __getstate__(self): return None def __setstate__(self,state): return None def claimChildren(self): return self.Object.Objects def onDelete(self, feature, subelements): try: for obj in self.claimChildren(): obj.ViewObject.show() except Exception as err: FreeCAD.Console.PrintError("Error in onDelete: " + str(err)) return True def canDragObjects(self): return True def canDropObjects(self): return True def canDragObject(self, dragged_object): return True def canDropObject(self, incoming_object): return hasattr(incoming_object, 'Shape') def dragObject(self, selfvp, dragged_object): objs = self.Object.Objects objs.remove(dragged_object) self.Object.Objects = objs def dropObject(self, selfvp, incoming_object): self.Object.Objects = self.Object.Objects + [incoming_object] class CommandConnect: """Command to create Connect feature.""" def GetResources(self): return {'Pixmap': getIconPath("Part_JoinConnect.svg"), 'MenuText': QtCore.QT_TRANSLATE_NOOP("Part_JoinConnect","Connect objects"), 'Accel': "", 'ToolTip': QtCore.QT_TRANSLATE_NOOP("Part_JoinConnect", "Fuses objects, taking care to preserve voids.")} def Activated(self): if len(FreeCADGui.Selection.getSelectionEx()) >= 1: cmdCreateJoinFeature(name="Connect", mode="Connect") else: mb = QtGui.QMessageBox() mb.setIcon(mb.Icon.Warning) mb.setText(_translate("Part_JoinFeatures", "Select at least two objects, or one or more compounds", None)) mb.setWindowTitle(_translate("Part_JoinFeatures","Bad selection", None)) mb.exec_() def IsActive(self): if FreeCAD.ActiveDocument: return True else: return False # -------------------------- /Connect ----------------------------------------- # -------------------------- Embed -------------------------------------------- def makeEmbed(name): '''makeEmbed(name): makes an Embed object.''' obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name) FeatureEmbed(obj) if FreeCAD.GuiUp: ViewProviderEmbed(obj.ViewObject) return obj class FeatureEmbed: """The Part Embed object.""" def __init__(self,obj): obj.addProperty("App::PropertyLink","Base","Embed","Object to embed into.") obj.addProperty("App::PropertyLink","Tool","Embed","Object to be embedded.") obj.addProperty("App::PropertyBool","Refine","Embed", "True = refine resulting shape. False = output as is.") obj.Refine = getParamRefine() obj.addProperty("App::PropertyLength","Tolerance","Embed", "Tolerance when intersecting (fuzzy value). " "In addition to tolerances of the shapes.") obj.Proxy = self self.Type = "FeatureEmbed" def execute(self,selfobj): rst = JoinAPI.embed_legacy(selfobj.Base.Shape, selfobj.Tool.Shape, selfobj.Tolerance) if selfobj.Refine: rst = rst.removeSplitter() selfobj.Shape = rst class ViewProviderEmbed: """A View Provider for the Part Embed feature.""" def __init__(self,vobj): vobj.Proxy = self def getIcon(self): return getIconPath("Part_JoinEmbed.svg") def attach(self, vobj): self.ViewObject = vobj self.Object = vobj.Object def __getstate__(self): return None def __setstate__(self,state): return None def claimChildren(self): return [self.Object.Base, self.Object.Tool] def onDelete(self, feature, subelements): try: self.Object.Base.ViewObject.show() self.Object.Tool.ViewObject.show() except Exception as err: FreeCAD.Console.PrintError("Error in onDelete: " + str(err)) return True class CommandEmbed: """Command to create Part Embed feature.""" def GetResources(self): return {'Pixmap': getIconPath("Part_JoinEmbed.svg"), 'MenuText': QtCore.QT_TRANSLATE_NOOP("Part_JoinEmbed","Embed object"), 'Accel': "", 'ToolTip': QtCore.QT_TRANSLATE_NOOP("Part_JoinEmbed", "Fuses one object into another, taking care to preserve voids.")} def Activated(self): if len(FreeCADGui.Selection.getSelectionEx()) == 2: cmdCreateJoinFeature(name = "Embed", mode = "Embed") else: mb = QtGui.QMessageBox() mb.setIcon(mb.Icon.Warning) mb.setText(_translate("Part_JoinFeatures", "Select base object, then the object to embed, " "and then invoke this tool.", None)) mb.setWindowTitle(_translate("Part_JoinFeatures","Bad selection", None)) mb.exec_() def IsActive(self): if FreeCAD.ActiveDocument: return True else: return False # -------------------------- /Embed ------------------------------------------- # -------------------------- Cutout ------------------------------------------- def makeCutout(name): '''makeCutout(name): makes an Cutout object.''' obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name) FeatureCutout(obj) if FreeCAD.GuiUp: ViewProviderCutout(obj.ViewObject) return obj class FeatureCutout: """The Part Cutout object.""" def __init__(self,obj): obj.addProperty("App::PropertyLink","Base","Cutout","Object to be cut.") obj.addProperty("App::PropertyLink","Tool","Cutout","Object to make cutout for.") obj.addProperty("App::PropertyBool","Refine","Cutout", "True = refine resulting shape. False = output as is.") obj.Refine = getParamRefine() obj.addProperty("App::PropertyLength","Tolerance","Cutout", "Tolerance when intersecting (fuzzy value). In addition to tolerances of the shapes.") obj.Proxy = self self.Type = "FeatureCutout" def execute(self,selfobj): rst = JoinAPI.cutout_legacy(selfobj.Base.Shape, selfobj.Tool.Shape, selfobj.Tolerance) if selfobj.Refine: rst = rst.removeSplitter() selfobj.Shape = rst class ViewProviderCutout: """A View Provider for the Part Cutout feature.""" def __init__(self,vobj): vobj.Proxy = self def getIcon(self): return getIconPath("Part_JoinCutout.svg") def attach(self, vobj): self.ViewObject = vobj self.Object = vobj.Object def __getstate__(self): return None def __setstate__(self,state): return None def claimChildren(self): return [self.Object.Base, self.Object.Tool] def onDelete(self, feature, subelements): try: self.Object.Base.ViewObject.show() self.Object.Tool.ViewObject.show() except Exception as err: FreeCAD.Console.PrintError("Error in onDelete: " + str(err)) return True class CommandCutout: """Command to create PartJoinFeature in Cutout mode.""" def GetResources(self): return {'Pixmap': getIconPath("Part_JoinCutout.svg"), 'MenuText': QtCore.QT_TRANSLATE_NOOP("Part_JoinCutout","Cutout for object"), 'Accel': "", 'ToolTip': QtCore.QT_TRANSLATE_NOOP("Part_JoinCutout", "Makes a cutout in one object to fit another object.")} def Activated(self): if len(FreeCADGui.Selection.getSelectionEx()) == 2: cmdCreateJoinFeature(name="Cutout", mode="Cutout") else: mb = QtGui.QMessageBox() mb.setIcon(mb.Icon.Warning) mb.setText(_translate("Part_JoinFeatures", "Select the object to make a cutout in, " "then the object that should fit into the cutout, " "and then invoke this tool.", None)) mb.setWindowTitle(_translate("Part_JoinFeatures","Bad selection", None)) mb.exec_() def IsActive(self): if FreeCAD.ActiveDocument: return True else: return False # -------------------------- /Cutout ------------------------------------------ def addCommands(): FreeCADGui.addCommand('Part_JoinCutout', CommandCutout()) FreeCADGui.addCommand('Part_JoinEmbed', CommandEmbed()) FreeCADGui.addCommand('Part_JoinConnect', CommandConnect())
lgpl-2.1
xuweiliang/Codelibrary
openstack_dashboard/test/integration_tests/pages/loginpage.py
18
3628
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from selenium.webdriver.common import by from selenium.webdriver.common import keys from openstack_dashboard.test.integration_tests.pages.admin.system import \ overviewpage as system_overviewpage from openstack_dashboard.test.integration_tests.pages import pageobject from openstack_dashboard.test.integration_tests.pages.project.compute import \ overviewpage as compute_overviewpage class LoginPage(pageobject.PageObject): _login_username_field_locator = (by.By.ID, 'id_username') _login_password_field_locator = (by.By.ID, 'id_password') _login_submit_button_locator = (by.By.CSS_SELECTOR, 'div.panel-footer button.btn') _login_logout_reason_locator = (by.By.ID, 'logout_reason') def __init__(self, driver, conf): super(LoginPage, self).__init__(driver, conf) self._page_title = "Login" def is_login_page(self): return (self.is_the_current_page() and self._is_element_visible(*self._login_submit_button_locator)) @property def username(self): return self._get_element(*self._login_username_field_locator) @property def password(self): return self._get_element(*self._login_password_field_locator) @property def login_button(self): return self._get_element(*self._login_submit_button_locator) def _click_on_login_button(self): self.login_button.click() def _press_enter_on_login_button(self): self.login_button.send_keys(keys.Keys.RETURN) def is_logout_reason_displayed(self): return self._get_element(*self._login_logout_reason_locator) def login(self, user=None, password=None): return self.login_with_mouse_click(user, password) def login_with_mouse_click(self, user, password): return self._do_login(user, password, self._click_on_login_button) def login_with_enter_key(self, user, password): return self._do_login(user, password, self._press_enter_on_login_button) def _do_login(self, user, password, login_method): if user == self.conf.identity.admin_username: if password is None: password = self.conf.identity.admin_password return self.login_as_admin(password, login_method) else: if password is None: password = self.conf.identity.password if user is None: user = self.conf.identity.username return self.login_as_user(user, password, login_method) def login_as_admin(self, password, login_method): self.username.send_keys(self.conf.identity.admin_username) self.password.send_keys(password) login_method() return system_overviewpage.OverviewPage(self.driver, self.conf) def login_as_user(self, user, password, login_method): self.username.send_keys(user) self.password.send_keys(password) login_method() return compute_overviewpage.OverviewPage(self.driver, self.conf)
apache-2.0
zlsun/XX-Net
code/default/gae_proxy/server/lib/google/net/proto/ProtocolBuffer.py
10
27352
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import array import httplib import re import struct try: import google.net.proto.proto1 as proto1 except ImportError: class ProtocolBufferDecodeError(Exception): pass class ProtocolBufferEncodeError(Exception): pass class ProtocolBufferReturnError(Exception): pass else: ProtocolBufferDecodeError = proto1.ProtocolBufferDecodeError ProtocolBufferEncodeError = proto1.ProtocolBufferEncodeError ProtocolBufferReturnError = proto1.ProtocolBufferReturnError __all__ = ['ProtocolMessage', 'Encoder', 'Decoder', 'ExtendableProtocolMessage', 'ProtocolBufferDecodeError', 'ProtocolBufferEncodeError', 'ProtocolBufferReturnError'] URL_RE = re.compile('^(https?)://([^/]+)(/.*)$') class ProtocolMessage: def __init__(self, contents=None): raise NotImplementedError def Clear(self): raise NotImplementedError def IsInitialized(self, debug_strs=None): raise NotImplementedError def Encode(self): try: return self._CEncode() except (NotImplementedError, AttributeError): e = Encoder() self.Output(e) return e.buffer().tostring() def SerializeToString(self): return self.Encode() def SerializePartialToString(self): try: return self._CEncodePartial() except (NotImplementedError, AttributeError): e = Encoder() self.OutputPartial(e) return e.buffer().tostring() def _CEncode(self): raise NotImplementedError def _CEncodePartial(self): raise NotImplementedError def ParseFromString(self, s): self.Clear() self.MergeFromString(s) def ParsePartialFromString(self, s): self.Clear() self.MergePartialFromString(s) def MergeFromString(self, s): self.MergePartialFromString(s) dbg = [] if not self.IsInitialized(dbg): raise ProtocolBufferDecodeError, '\n\t'.join(dbg) def MergePartialFromString(self, s): try: self._CMergeFromString(s) except (NotImplementedError, AttributeError): a = array.array('B') a.fromstring(s) d = Decoder(a, 0, len(a)) self.TryMerge(d) def _CMergeFromString(self, s): raise NotImplementedError def __getstate__(self): return self.Encode() def __setstate__(self, contents_): self.__init__(contents=contents_) def sendCommand(self, server, url, response, follow_redirects=1, secure=0, keyfile=None, certfile=None): data = self.Encode() if secure: if keyfile and certfile: conn = httplib.HTTPSConnection(server, key_file=keyfile, cert_file=certfile) else: conn = httplib.HTTPSConnection(server) else: conn = httplib.HTTPConnection(server) conn.putrequest("POST", url) conn.putheader("Content-Length", "%d" %len(data)) conn.endheaders() conn.send(data) resp = conn.getresponse() if follow_redirects > 0 and resp.status == 302: m = URL_RE.match(resp.getheader('Location')) if m: protocol, server, url = m.groups() return self.sendCommand(server, url, response, follow_redirects=follow_redirects - 1, secure=(protocol == 'https'), keyfile=keyfile, certfile=certfile) if resp.status != 200: raise ProtocolBufferReturnError(resp.status) if response is not None: response.ParseFromString(resp.read()) return response def sendSecureCommand(self, server, keyfile, certfile, url, response, follow_redirects=1): return self.sendCommand(server, url, response, follow_redirects=follow_redirects, secure=1, keyfile=keyfile, certfile=certfile) def __str__(self, prefix="", printElemNumber=0): raise NotImplementedError def ToASCII(self): return self._CToASCII(ProtocolMessage._SYMBOLIC_FULL_ASCII) def ToCompactASCII(self): return self._CToASCII(ProtocolMessage._NUMERIC_ASCII) def ToShortASCII(self): return self._CToASCII(ProtocolMessage._SYMBOLIC_SHORT_ASCII) _NUMERIC_ASCII = 0 _SYMBOLIC_SHORT_ASCII = 1 _SYMBOLIC_FULL_ASCII = 2 def _CToASCII(self, output_format): raise NotImplementedError def ParseASCII(self, ascii_string): raise NotImplementedError def ParseASCIIIgnoreUnknown(self, ascii_string): raise NotImplementedError def Equals(self, other): raise NotImplementedError def __eq__(self, other): if other.__class__ is self.__class__: return self.Equals(other) return NotImplemented def __ne__(self, other): if other.__class__ is self.__class__: return not self.Equals(other) return NotImplemented def Output(self, e): dbg = [] if not self.IsInitialized(dbg): raise ProtocolBufferEncodeError, '\n\t'.join(dbg) self.OutputUnchecked(e) return def OutputUnchecked(self, e): raise NotImplementedError def OutputPartial(self, e): raise NotImplementedError def Parse(self, d): self.Clear() self.Merge(d) return def Merge(self, d): self.TryMerge(d) dbg = [] if not self.IsInitialized(dbg): raise ProtocolBufferDecodeError, '\n\t'.join(dbg) return def TryMerge(self, d): raise NotImplementedError def CopyFrom(self, pb): if (pb == self): return self.Clear() self.MergeFrom(pb) def MergeFrom(self, pb): raise NotImplementedError def lengthVarInt32(self, n): return self.lengthVarInt64(n) def lengthVarInt64(self, n): if n < 0: return 10 result = 0 while 1: result += 1 n >>= 7 if n == 0: break return result def lengthString(self, n): return self.lengthVarInt32(n) + n def DebugFormat(self, value): return "%s" % value def DebugFormatInt32(self, value): if (value <= -2000000000 or value >= 2000000000): return self.DebugFormatFixed32(value) return "%d" % value def DebugFormatInt64(self, value): if (value <= -20000000000000 or value >= 20000000000000): return self.DebugFormatFixed64(value) return "%d" % value def DebugFormatString(self, value): def escape(c): o = ord(c) if o == 10: return r"\n" if o == 39: return r"\'" if o == 34: return r'\"' if o == 92: return r"\\" if o >= 127 or o < 32: return "\\%03o" % o return c return '"' + "".join([escape(c) for c in value]) + '"' def DebugFormatFloat(self, value): return "%ff" % value def DebugFormatFixed32(self, value): if (value < 0): value += (1L<<32) return "0x%x" % value def DebugFormatFixed64(self, value): if (value < 0): value += (1L<<64) return "0x%x" % value def DebugFormatBool(self, value): if value: return "true" else: return "false" TYPE_DOUBLE = 1 TYPE_FLOAT = 2 TYPE_INT64 = 3 TYPE_UINT64 = 4 TYPE_INT32 = 5 TYPE_FIXED64 = 6 TYPE_FIXED32 = 7 TYPE_BOOL = 8 TYPE_STRING = 9 TYPE_GROUP = 10 TYPE_FOREIGN = 11 _TYPE_TO_DEBUG_STRING = { TYPE_INT32: ProtocolMessage.DebugFormatInt32, TYPE_INT64: ProtocolMessage.DebugFormatInt64, TYPE_UINT64: ProtocolMessage.DebugFormatInt64, TYPE_FLOAT: ProtocolMessage.DebugFormatFloat, TYPE_STRING: ProtocolMessage.DebugFormatString, TYPE_FIXED32: ProtocolMessage.DebugFormatFixed32, TYPE_FIXED64: ProtocolMessage.DebugFormatFixed64, TYPE_BOOL: ProtocolMessage.DebugFormatBool } class Encoder: NUMERIC = 0 DOUBLE = 1 STRING = 2 STARTGROUP = 3 ENDGROUP = 4 FLOAT = 5 MAX_TYPE = 6 def __init__(self): self.buf = array.array('B') return def buffer(self): return self.buf def put8(self, v): if v < 0 or v >= (1<<8): raise ProtocolBufferEncodeError, "u8 too big" self.buf.append(v & 255) return def put16(self, v): if v < 0 or v >= (1<<16): raise ProtocolBufferEncodeError, "u16 too big" self.buf.append((v >> 0) & 255) self.buf.append((v >> 8) & 255) return def put32(self, v): if v < 0 or v >= (1L<<32): raise ProtocolBufferEncodeError, "u32 too big" self.buf.append((v >> 0) & 255) self.buf.append((v >> 8) & 255) self.buf.append((v >> 16) & 255) self.buf.append((v >> 24) & 255) return def put64(self, v): if v < 0 or v >= (1L<<64): raise ProtocolBufferEncodeError, "u64 too big" self.buf.append((v >> 0) & 255) self.buf.append((v >> 8) & 255) self.buf.append((v >> 16) & 255) self.buf.append((v >> 24) & 255) self.buf.append((v >> 32) & 255) self.buf.append((v >> 40) & 255) self.buf.append((v >> 48) & 255) self.buf.append((v >> 56) & 255) return def putVarInt32(self, v): buf_append = self.buf.append if v & 127 == v: buf_append(v) return if v >= 0x80000000 or v < -0x80000000: raise ProtocolBufferEncodeError, "int32 too big" if v < 0: v += 0x10000000000000000 while True: bits = v & 127 v >>= 7 if v: bits |= 128 buf_append(bits) if not v: break return def putVarInt64(self, v): buf_append = self.buf.append if v >= 0x8000000000000000 or v < -0x8000000000000000: raise ProtocolBufferEncodeError, "int64 too big" if v < 0: v += 0x10000000000000000 while True: bits = v & 127 v >>= 7 if v: bits |= 128 buf_append(bits) if not v: break return def putVarUint64(self, v): buf_append = self.buf.append if v < 0 or v >= 0x10000000000000000: raise ProtocolBufferEncodeError, "uint64 too big" while True: bits = v & 127 v >>= 7 if v: bits |= 128 buf_append(bits) if not v: break return def putFloat(self, v): a = array.array('B') a.fromstring(struct.pack("<f", v)) self.buf.extend(a) return def putDouble(self, v): a = array.array('B') a.fromstring(struct.pack("<d", v)) self.buf.extend(a) return def putBoolean(self, v): if v: self.buf.append(1) else: self.buf.append(0) return def putPrefixedString(self, v): v = str(v) self.putVarInt32(len(v)) self.buf.fromstring(v) return def putRawString(self, v): self.buf.fromstring(v) _TYPE_TO_METHOD = { TYPE_DOUBLE: putDouble, TYPE_FLOAT: putFloat, TYPE_FIXED64: put64, TYPE_FIXED32: put32, TYPE_INT32: putVarInt32, TYPE_INT64: putVarInt64, TYPE_UINT64: putVarUint64, TYPE_BOOL: putBoolean, TYPE_STRING: putPrefixedString } _TYPE_TO_BYTE_SIZE = { TYPE_DOUBLE: 8, TYPE_FLOAT: 4, TYPE_FIXED64: 8, TYPE_FIXED32: 4, TYPE_BOOL: 1 } class Decoder: def __init__(self, buf, idx, limit): self.buf = buf self.idx = idx self.limit = limit return def avail(self): return self.limit - self.idx def buffer(self): return self.buf def pos(self): return self.idx def skip(self, n): if self.idx + n > self.limit: raise ProtocolBufferDecodeError, "truncated" self.idx += n return def skipData(self, tag): t = tag & 7 if t == Encoder.NUMERIC: self.getVarInt64() elif t == Encoder.DOUBLE: self.skip(8) elif t == Encoder.STRING: n = self.getVarInt32() self.skip(n) elif t == Encoder.STARTGROUP: while 1: t = self.getVarInt32() if (t & 7) == Encoder.ENDGROUP: break else: self.skipData(t) if (t - Encoder.ENDGROUP) != (tag - Encoder.STARTGROUP): raise ProtocolBufferDecodeError, "corrupted" elif t == Encoder.ENDGROUP: raise ProtocolBufferDecodeError, "corrupted" elif t == Encoder.FLOAT: self.skip(4) else: raise ProtocolBufferDecodeError, "corrupted" def get8(self): if self.idx >= self.limit: raise ProtocolBufferDecodeError, "truncated" c = self.buf[self.idx] self.idx += 1 return c def get16(self): if self.idx + 2 > self.limit: raise ProtocolBufferDecodeError, "truncated" c = self.buf[self.idx] d = self.buf[self.idx + 1] self.idx += 2 return (d << 8) | c def get32(self): if self.idx + 4 > self.limit: raise ProtocolBufferDecodeError, "truncated" c = self.buf[self.idx] d = self.buf[self.idx + 1] e = self.buf[self.idx + 2] f = long(self.buf[self.idx + 3]) self.idx += 4 return (f << 24) | (e << 16) | (d << 8) | c def get64(self): if self.idx + 8 > self.limit: raise ProtocolBufferDecodeError, "truncated" c = self.buf[self.idx] d = self.buf[self.idx + 1] e = self.buf[self.idx + 2] f = long(self.buf[self.idx + 3]) g = long(self.buf[self.idx + 4]) h = long(self.buf[self.idx + 5]) i = long(self.buf[self.idx + 6]) j = long(self.buf[self.idx + 7]) self.idx += 8 return ((j << 56) | (i << 48) | (h << 40) | (g << 32) | (f << 24) | (e << 16) | (d << 8) | c) def getVarInt32(self): b = self.get8() if not (b & 128): return b result = long(0) shift = 0 while 1: result |= (long(b & 127) << shift) shift += 7 if not (b & 128): if result >= 0x10000000000000000L: raise ProtocolBufferDecodeError, "corrupted" break if shift >= 64: raise ProtocolBufferDecodeError, "corrupted" b = self.get8() if result >= 0x8000000000000000L: result -= 0x10000000000000000L if result >= 0x80000000L or result < -0x80000000L: raise ProtocolBufferDecodeError, "corrupted" return result def getVarInt64(self): result = self.getVarUint64() if result >= (1L << 63): result -= (1L << 64) return result def getVarUint64(self): result = long(0) shift = 0 while 1: if shift >= 64: raise ProtocolBufferDecodeError, "corrupted" b = self.get8() result |= (long(b & 127) << shift) shift += 7 if not (b & 128): if result >= (1L << 64): raise ProtocolBufferDecodeError, "corrupted" return result return result def getFloat(self): if self.idx + 4 > self.limit: raise ProtocolBufferDecodeError, "truncated" a = self.buf[self.idx:self.idx+4] self.idx += 4 return struct.unpack("<f", a)[0] def getDouble(self): if self.idx + 8 > self.limit: raise ProtocolBufferDecodeError, "truncated" a = self.buf[self.idx:self.idx+8] self.idx += 8 return struct.unpack("<d", a)[0] def getBoolean(self): b = self.get8() if b != 0 and b != 1: raise ProtocolBufferDecodeError, "corrupted" return b def getPrefixedString(self): length = self.getVarInt32() if self.idx + length > self.limit: raise ProtocolBufferDecodeError, "truncated" r = self.buf[self.idx : self.idx + length] self.idx += length return r.tostring() def getRawString(self): r = self.buf[self.idx:self.limit] self.idx = self.limit return r.tostring() _TYPE_TO_METHOD = { TYPE_DOUBLE: getDouble, TYPE_FLOAT: getFloat, TYPE_FIXED64: get64, TYPE_FIXED32: get32, TYPE_INT32: getVarInt32, TYPE_INT64: getVarInt64, TYPE_UINT64: getVarUint64, TYPE_BOOL: getBoolean, TYPE_STRING: getPrefixedString } class ExtensionIdentifier(object): __slots__ = ('full_name', 'number', 'field_type', 'wire_tag', 'is_repeated', 'default', 'containing_cls', 'composite_cls', 'message_name') def __init__(self, full_name, number, field_type, wire_tag, is_repeated, default): self.full_name = full_name self.number = number self.field_type = field_type self.wire_tag = wire_tag self.is_repeated = is_repeated self.default = default class ExtendableProtocolMessage(ProtocolMessage): def HasExtension(self, extension): self._VerifyExtensionIdentifier(extension) return extension in self._extension_fields def ClearExtension(self, extension): self._VerifyExtensionIdentifier(extension) if extension in self._extension_fields: del self._extension_fields[extension] def GetExtension(self, extension, index=None): self._VerifyExtensionIdentifier(extension) if extension in self._extension_fields: result = self._extension_fields[extension] else: if extension.is_repeated: result = [] elif extension.composite_cls: result = extension.composite_cls() else: result = extension.default if extension.is_repeated: result = result[index] return result def SetExtension(self, extension, *args): self._VerifyExtensionIdentifier(extension) if extension.composite_cls: raise TypeError( 'Cannot assign to extension "%s" because it is a composite type.' % extension.full_name) if extension.is_repeated: if (len(args) != 2): raise TypeError( 'SetExtension(extension, index, value) for repeated extension ' 'takes exactly 3 arguments: (%d given)' % len(args)) index = args[0] value = args[1] self._extension_fields[extension][index] = value else: if (len(args) != 1): raise TypeError( 'SetExtension(extension, value) for singular extension ' 'takes exactly 3 arguments: (%d given)' % len(args)) value = args[0] self._extension_fields[extension] = value def MutableExtension(self, extension, index=None): self._VerifyExtensionIdentifier(extension) if extension.composite_cls is None: raise TypeError( 'MutableExtension() cannot be applied to "%s", because it is not a ' 'composite type.' % extension.full_name) if extension.is_repeated: if index is None: raise TypeError( 'MutableExtension(extension, index) for repeated extension ' 'takes exactly 2 arguments: (1 given)') return self.GetExtension(extension, index) if extension in self._extension_fields: return self._extension_fields[extension] else: result = extension.composite_cls() self._extension_fields[extension] = result return result def ExtensionList(self, extension): self._VerifyExtensionIdentifier(extension) if not extension.is_repeated: raise TypeError( 'ExtensionList() cannot be applied to "%s", because it is not a ' 'repeated extension.' % extension.full_name) if extension in self._extension_fields: return self._extension_fields[extension] result = [] self._extension_fields[extension] = result return result def ExtensionSize(self, extension): self._VerifyExtensionIdentifier(extension) if not extension.is_repeated: raise TypeError( 'ExtensionSize() cannot be applied to "%s", because it is not a ' 'repeated extension.' % extension.full_name) if extension in self._extension_fields: return len(self._extension_fields[extension]) return 0 def AddExtension(self, extension, value=None): self._VerifyExtensionIdentifier(extension) if not extension.is_repeated: raise TypeError( 'AddExtension() cannot be applied to "%s", because it is not a ' 'repeated extension.' % extension.full_name) if extension in self._extension_fields: field = self._extension_fields[extension] else: field = [] self._extension_fields[extension] = field if extension.composite_cls: if value is not None: raise TypeError( 'value must not be set in AddExtension() for "%s", because it is ' 'a message type extension. Set values on the returned message ' 'instead.' % extension.full_name) msg = extension.composite_cls() field.append(msg) return msg field.append(value) def _VerifyExtensionIdentifier(self, extension): if extension.containing_cls != self.__class__: raise TypeError("Containing type of %s is %s, but not %s." % (extension.full_name, extension.containing_cls.__name__, self.__class__.__name__)) def _MergeExtensionFields(self, x): for ext, val in x._extension_fields.items(): if ext.is_repeated: for i in xrange(len(val)): if ext.composite_cls is None: self.AddExtension(ext, val[i]) else: self.AddExtension(ext).MergeFrom(val[i]) else: if ext.composite_cls is None: self.SetExtension(ext, val) else: self.MutableExtension(ext).MergeFrom(val) def _ListExtensions(self): result = [ext for ext in self._extension_fields.keys() if (not ext.is_repeated) or self.ExtensionSize(ext) > 0] result.sort(key = lambda item: item.number) return result def _ExtensionEquals(self, x): extensions = self._ListExtensions() if extensions != x._ListExtensions(): return False for ext in extensions: if ext.is_repeated: if self.ExtensionSize(ext) != x.ExtensionSize(ext): return False for e1, e2 in zip(self.ExtensionList(ext), x.ExtensionList(ext)): if e1 != e2: return False else: if self.GetExtension(ext) != x.GetExtension(ext): return False return True def _OutputExtensionFields(self, out, partial, extensions, start_index, end_field_number): def OutputSingleField(ext, value): out.putVarInt32(ext.wire_tag) if ext.field_type == TYPE_GROUP: if partial: value.OutputPartial(out) else: value.OutputUnchecked(out) out.putVarInt32(ext.wire_tag + 1) elif ext.field_type == TYPE_FOREIGN: if partial: out.putVarInt32(value.ByteSizePartial()) value.OutputPartial(out) else: out.putVarInt32(value.ByteSize()) value.OutputUnchecked(out) else: Encoder._TYPE_TO_METHOD[ext.field_type](out, value) size = len(extensions) for ext_index in xrange(start_index, size): ext = extensions[ext_index] if ext.number >= end_field_number: return ext_index if ext.is_repeated: for i in xrange(len(self._extension_fields[ext])): OutputSingleField(ext, self._extension_fields[ext][i]) else: OutputSingleField(ext, self._extension_fields[ext]) return size def _ParseOneExtensionField(self, wire_tag, d): number = wire_tag >> 3 if number in self._extensions_by_field_number: ext = self._extensions_by_field_number[number] if wire_tag != ext.wire_tag: return if ext.field_type == TYPE_FOREIGN: length = d.getVarInt32() tmp = Decoder(d.buffer(), d.pos(), d.pos() + length) if ext.is_repeated: self.AddExtension(ext).TryMerge(tmp) else: self.MutableExtension(ext).TryMerge(tmp) d.skip(length) elif ext.field_type == TYPE_GROUP: if ext.is_repeated: self.AddExtension(ext).TryMerge(d) else: self.MutableExtension(ext).TryMerge(d) else: value = Decoder._TYPE_TO_METHOD[ext.field_type](d) if ext.is_repeated: self.AddExtension(ext, value) else: self.SetExtension(ext, value) else: d.skipData(wire_tag) def _ExtensionByteSize(self, partial): size = 0 for extension, value in self._extension_fields.items(): ftype = extension.field_type tag_size = self.lengthVarInt64(extension.wire_tag) if ftype == TYPE_GROUP: tag_size *= 2 if extension.is_repeated: size += tag_size * len(value) for single_value in value: size += self._FieldByteSize(ftype, single_value, partial) else: size += tag_size + self._FieldByteSize(ftype, value, partial) return size def _FieldByteSize(self, ftype, value, partial): size = 0 if ftype == TYPE_STRING: size = self.lengthString(len(value)) elif ftype == TYPE_FOREIGN or ftype == TYPE_GROUP: if partial: size = self.lengthString(value.ByteSizePartial()) else: size = self.lengthString(value.ByteSize()) elif ftype == TYPE_INT64 or ftype == TYPE_UINT64 or ftype == TYPE_INT32: size = self.lengthVarInt64(value) else: if ftype in Encoder._TYPE_TO_BYTE_SIZE: size = Encoder._TYPE_TO_BYTE_SIZE[ftype] else: raise AssertionError( 'Extension type %d is not recognized.' % ftype) return size def _ExtensionDebugString(self, prefix, printElemNumber): res = '' extensions = self._ListExtensions() for extension in extensions: value = self._extension_fields[extension] if extension.is_repeated: cnt = 0 for e in value: elm="" if printElemNumber: elm = "(%d)" % cnt if extension.composite_cls is not None: res += prefix + "[%s%s] {\n" % (extension.full_name, elm) res += e.__str__(prefix + " ", printElemNumber) res += prefix + "}\n" else: if extension.composite_cls is not None: res += prefix + "[%s] {\n" % extension.full_name res += value.__str__( prefix + " ", printElemNumber) res += prefix + "}\n" else: if extension.field_type in _TYPE_TO_DEBUG_STRING: text_value = _TYPE_TO_DEBUG_STRING[ extension.field_type](self, value) else: text_value = self.DebugFormat(value) res += prefix + "[%s]: %s\n" % (extension.full_name, text_value) return res @staticmethod def _RegisterExtension(cls, extension, composite_cls=None): extension.containing_cls = cls extension.composite_cls = composite_cls if composite_cls is not None: extension.message_name = composite_cls._PROTO_DESCRIPTOR_NAME actual_handle = cls._extensions_by_field_number.setdefault( extension.number, extension) if actual_handle is not extension: raise AssertionError( 'Extensions "%s" and "%s" both try to extend message type "%s" with ' 'field number %d.' % (extension.full_name, actual_handle.full_name, cls.__name__, extension.number))
bsd-2-clause
nbborlongan/geonode
geonode/sitemap.py
35
1218
######################################################################### # # Copyright (C) 2012 OpenPlans # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### from django.contrib.sitemaps import Sitemap from geonode.maps.models import Layer, Map class LayerSitemap(Sitemap): changefreq = "never" priority = 0.5 def items(self): return Layer.objects.all() def lastmod(self, obj): return obj.date class MapSitemap(Sitemap): changefreq = "never" priority = 0.5 def items(self): return Map.objects.all()
gpl-3.0
tensorflow/lucid
tests/recipes/activation_atlas.py
1
1157
import pytest from lucid.modelzoo.aligned_activations import NUMBER_OF_AVAILABLE_SAMPLES from lucid.modelzoo.vision_models import AlexNet, InceptionV1 from lucid.recipes.activation_atlas import activation_atlas, aligned_activation_atlas from lucid.misc.io import save # Run test with just 1/10th of available samples subset = NUMBER_OF_AVAILABLE_SAMPLES // 10 @pytest.mark.skip(reason="takes too long to complete on CI") def test_activation_atlas(): model = AlexNet() layer = model.layers[1] atlas = activation_atlas(model, layer, number_activations=subset) save(atlas, "tests/recipes/results/activation_atlas/atlas.jpg") @pytest.mark.skip(reason="takes too long to complete on CI") def test_aligned_activation_atlas(): model1 = AlexNet() layer1 = model1.layers[1] model2 = InceptionV1() layer2 = model2.layers[8] # mixed4d atlasses = aligned_activation_atlas( model1, layer1, model2, layer2, number_activations=subset ) path = "tests/recipes/results/activation_atlas/aligned_atlas-{}-of-{}.jpg".format(index, len(atlasses)) for index, atlas in enumerate(atlasses): save(atlas, path)
apache-2.0
JeanMarieMineau/ISN-s-Cube
Bouton.py
1
5185
''' Created on 5 mai 2017 Copyright 2017 Jean-Marie Mineau, Maxime Keller This file is part of "ISN's Cube". "ISN's Cube" is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. "ISN's Cube" is distributed in the hope that it will be useful and recreative, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with "ISN's Cube". If not, see <http://www.gnu.org/licenses/>. @author: <[email protected]> Class pour les boutons. Une classe qui gère l'ensemble des boutons, et une autre qui est le bouton. ''' import pygame class Boutons: """Class gérant la création de boutons et le fait de clique dessus, ainsi que l'affichage.""" def __init__(self): """Cette class à pour attribut une liste de boutons.""" self.boutons = [] def nouveauBouton(self, pos, image=None, couleur=(255,0,255), size=(60,60), callback=lambda *args: None, argsCallback=[]): """Crée un nouveau bouton.""" bouton = Bouton(pos, self, image=image, couleur=couleur, size=size, callback=callback, argsCallback=argsCallback) self.boutons.append(bouton) return bouton def update(self, events): """Gère le click sur le bouton.""" for event in events: if event.type == pygame.MOUSEBUTTONDOWN: pos = event.pos self.callbackClic(pos) def callbackClic(self, pos): """Methode appellé lors d'un clic.""" for bouton in self.boutons: if bouton.rect.collidepoint(*pos): bouton.callback(*bouton.argsCallback) return def display(self, screen): """Affiche les Boutons.""" for bouton in self.boutons: bouton.display(screen) class Bouton: """Un Bouton.""" def __init__(self, pos, parent, image=None, couleur=(255,0,255), size=(60,60), callback=lambda *args: None, argsCallback=[]): """Crée un bouton, si une image est donné, il la charge, sinon, c'est un rectangle de taille size et de couleur couleur qui est affiché.""" self.parent = parent self.pos = pos if image is not None: self.surface = pygame.image.load(image).convert_alpha() self.rect = self.surface.get_rect() else: self.surface = pygame.Surface(size) self.surface.fill(couleur) self.rect = self.surface.get_rect() self.rect = self.rect.move(self.pos) self.callback = callback self.argsCallback = argsCallback def suppr(self): """Suprime le Bouton.""" self.parent.boutons.remove(self) def display(self, screen): """Affiche le Bouton.""" screen.blit(self.surface, self.rect) def callbackTest(a, b, c, d, e, f, g): """Test de callback.""" print(a) print(b) print(c) print(d) print(e) print(f) print(g) def callback2(*args): print("toto") if __name__ == "__main__": boutons = Boutons() screen = pygame.display.set_mode((1000, 400)) clock = pygame.time.Clock() pos = (50,50) bouton = boutons.nouveauBouton(pos, callback=callbackTest, argsCallback=["A", "B", "C", "D", "E", "F", "G"]) pos2 = (50, 200) bouton2 = boutons.nouveauBouton(pos2, callback=callback2, argsCallback=["H", "I", "J", "K", "L", "M", "N"]) while True: screen.fill((150, 150, 150)) events = pygame.event.get() for event in events: if event.type == pygame.QUIT: exit() quit() boutons.update(events) boutons.display(screen) pygame.display.update() clock.tick(30)
gpl-3.0
omprakasha/odoo
addons/l10n_do/__openerp__.py
309
2992
# -*- coding: utf-8 -*- # ############################################################################# # # First author: Jose Ernesto Mendez <[email protected]> (Open Business Solutions SRL.) # Copyright (c) 2012 -TODAY Open Business Solutions, SRL. (http://obsdr.com). All rights reserved. # # This is a fork to upgrade to odoo 8.0 # by Marcos Organizador de Negocios - Eneldo Serrata - www.marcos.org.do # # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs. # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company like Marcos Organizador de Negocios. # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # ############################################################################## { 'name': 'Dominican Republic - Accounting', 'version': '1.0', 'category': 'Localization/Account Charts', 'description': """ This is the base module to manage the accounting chart for Dominican Republic. ============================================================================== * Chart of Accounts. * The Tax Code Chart for Domincan Republic * The main taxes used in Domincan Republic * Fiscal position for local """, 'author': 'Eneldo Serrata - Marcos Organizador de Negocios, SRL.', 'website': 'http://marcos.do', 'depends': ['account', 'base_iban'], 'data': [ # basic accounting data 'data/ir_sequence_type.xml', 'data/ir_sequence.xml', 'data/account_journal.xml', 'data/account.account.type.csv', 'data/account.account.template.csv', 'data/account.tax.code.template.csv', 'data/account_chart_template.xml', 'data/account.tax.template.csv', 'data/l10n_do_base_data.xml', # Adds fiscal position 'data/account.fiscal.position.template.csv', 'data/account.fiscal.position.tax.template.csv', # configuration wizard, views, reports... 'data/l10n_do_wizard.xml' ], 'test': [], 'demo': [], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
clay23/lab4
lib/werkzeug/contrib/fixers.py
464
9949
# -*- coding: utf-8 -*- """ werkzeug.contrib.fixers ~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 0.5 This module includes various helpers that fix bugs in web servers. They may be necessary for some versions of a buggy web server but not others. We try to stay updated with the status of the bugs as good as possible but you have to make sure whether they fix the problem you encounter. If you notice bugs in webservers not fixed in this module consider contributing a patch. :copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ try: from urllib import unquote except ImportError: from urllib.parse import unquote from werkzeug.http import parse_options_header, parse_cache_control_header, \ parse_set_header from werkzeug.useragents import UserAgent from werkzeug.datastructures import Headers, ResponseCacheControl class CGIRootFix(object): """Wrap the application in this middleware if you are using FastCGI or CGI and you have problems with your app root being set to the cgi script's path instead of the path users are going to visit .. versionchanged:: 0.9 Added `app_root` parameter and renamed from `LighttpdCGIRootFix`. :param app: the WSGI application :param app_root: Defaulting to ``'/'``, you can set this to something else if your app is mounted somewhere else. """ def __init__(self, app, app_root='/'): self.app = app self.app_root = app_root def __call__(self, environ, start_response): # only set PATH_INFO for older versions of Lighty or if no # server software is provided. That's because the test was # added in newer Werkzeug versions and we don't want to break # people's code if they are using this fixer in a test that # does not set the SERVER_SOFTWARE key. if 'SERVER_SOFTWARE' not in environ or \ environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28': environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \ environ.get('PATH_INFO', '') environ['SCRIPT_NAME'] = self.app_root.strip('/') return self.app(environ, start_response) # backwards compatibility LighttpdCGIRootFix = CGIRootFix class PathInfoFromRequestUriFix(object): """On windows environment variables are limited to the system charset which makes it impossible to store the `PATH_INFO` variable in the environment without loss of information on some systems. This is for example a problem for CGI scripts on a Windows Apache. This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`, `REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the fix can only be applied if the webserver supports either of these variables. :param app: the WSGI application """ def __init__(self, app): self.app = app def __call__(self, environ, start_response): for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL': if key not in environ: continue request_uri = unquote(environ[key]) script_name = unquote(environ.get('SCRIPT_NAME', '')) if request_uri.startswith(script_name): environ['PATH_INFO'] = request_uri[len(script_name):] \ .split('?', 1)[0] break return self.app(environ, start_response) class ProxyFix(object): """This middleware can be applied to add HTTP proxy support to an application that was not designed with HTTP proxies in mind. It sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers. If you have more than one proxy server in front of your app, set `num_proxies` accordingly. Do not use this middleware in non-proxy setups for security reasons. The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and `werkzeug.proxy_fix.orig_http_host`. :param app: the WSGI application :param num_proxies: the number of proxy servers in front of the app. """ def __init__(self, app, num_proxies=1): self.app = app self.num_proxies = num_proxies def get_remote_addr(self, forwarded_for): """Selects the new remote addr from the given list of ips in X-Forwarded-For. By default it picks the one that the `num_proxies` proxy server provides. Before 0.9 it would always pick the first. .. versionadded:: 0.8 """ if len(forwarded_for) >= self.num_proxies: return forwarded_for[-1 * self.num_proxies] def __call__(self, environ, start_response): getter = environ.get forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '') forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',') forwarded_host = getter('HTTP_X_FORWARDED_HOST', '') environ.update({ 'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'), 'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'), 'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST') }) forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x] remote_addr = self.get_remote_addr(forwarded_for) if remote_addr is not None: environ['REMOTE_ADDR'] = remote_addr if forwarded_host: environ['HTTP_HOST'] = forwarded_host if forwarded_proto: environ['wsgi.url_scheme'] = forwarded_proto return self.app(environ, start_response) class HeaderRewriterFix(object): """This middleware can remove response headers and add others. This is for example useful to remove the `Date` header from responses if you are using a server that adds that header, no matter if it's present or not or to add `X-Powered-By` headers:: app = HeaderRewriterFix(app, remove_headers=['Date'], add_headers=[('X-Powered-By', 'WSGI')]) :param app: the WSGI application :param remove_headers: a sequence of header keys that should be removed. :param add_headers: a sequence of ``(key, value)`` tuples that should be added. """ def __init__(self, app, remove_headers=None, add_headers=None): self.app = app self.remove_headers = set(x.lower() for x in (remove_headers or ())) self.add_headers = list(add_headers or ()) def __call__(self, environ, start_response): def rewriting_start_response(status, headers, exc_info=None): new_headers = [] for key, value in headers: if key.lower() not in self.remove_headers: new_headers.append((key, value)) new_headers += self.add_headers return start_response(status, new_headers, exc_info) return self.app(environ, rewriting_start_response) class InternetExplorerFix(object): """This middleware fixes a couple of bugs with Microsoft Internet Explorer. Currently the following fixes are applied: - removing of `Vary` headers for unsupported mimetypes which causes troubles with caching. Can be disabled by passing ``fix_vary=False`` to the constructor. see: http://support.microsoft.com/kb/824847/en-us - removes offending headers to work around caching bugs in Internet Explorer if `Content-Disposition` is set. Can be disabled by passing ``fix_attach=False`` to the constructor. If it does not detect affected Internet Explorer versions it won't touch the request / response. """ # This code was inspired by Django fixers for the same bugs. The # fix_vary and fix_attach fixers were originally implemented in Django # by Michael Axiak and is available as part of the Django project: # http://code.djangoproject.com/ticket/4148 def __init__(self, app, fix_vary=True, fix_attach=True): self.app = app self.fix_vary = fix_vary self.fix_attach = fix_attach def fix_headers(self, environ, headers, status=None): if self.fix_vary: header = headers.get('content-type', '') mimetype, options = parse_options_header(header) if mimetype not in ('text/html', 'text/plain', 'text/sgml'): headers.pop('vary', None) if self.fix_attach and 'content-disposition' in headers: pragma = parse_set_header(headers.get('pragma', '')) pragma.discard('no-cache') header = pragma.to_header() if not header: headers.pop('pragma', '') else: headers['Pragma'] = header header = headers.get('cache-control', '') if header: cc = parse_cache_control_header(header, cls=ResponseCacheControl) cc.no_cache = None cc.no_store = False header = cc.to_header() if not header: headers.pop('cache-control', '') else: headers['Cache-Control'] = header def run_fixed(self, environ, start_response): def fixing_start_response(status, headers, exc_info=None): headers = Headers(headers) self.fix_headers(environ, headers, status) return start_response(status, headers.to_wsgi_list(), exc_info) return self.app(environ, fixing_start_response) def __call__(self, environ, start_response): ua = UserAgent(environ) if ua.browser != 'msie': return self.app(environ, start_response) return self.run_fixed(environ, start_response)
apache-2.0
akashsinghal/Speech-Memorization-App
speech/Swift/Speech-gRPC-Streaming/env/lib/python3.6/site-packages/setuptools/command/egg_info.py
50
25016
"""setuptools.command.egg_info Create a distribution's .egg-info directory and contents""" from distutils.filelist import FileList as _FileList from distutils.errors import DistutilsInternalError from distutils.util import convert_path from distutils import log import distutils.errors import distutils.filelist import os import re import sys import io import warnings import time import collections from setuptools.extern import six from setuptools.extern.six.moves import map from setuptools import Command from setuptools.command.sdist import sdist from setuptools.command.sdist import walk_revctrl from setuptools.command.setopt import edit_config from setuptools.command import bdist_egg from pkg_resources import ( parse_requirements, safe_name, parse_version, safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) import setuptools.unicode_utils as unicode_utils from setuptools.glob import glob from pkg_resources.extern import packaging def translate_pattern(glob): """ Translate a file path glob like '*.txt' in to a regular expression. This differs from fnmatch.translate which allows wildcards to match directory separators. It also knows about '**/' which matches any number of directories. """ pat = '' # This will split on '/' within [character classes]. This is deliberate. chunks = glob.split(os.path.sep) sep = re.escape(os.sep) valid_char = '[^%s]' % (sep,) for c, chunk in enumerate(chunks): last_chunk = c == len(chunks) - 1 # Chunks that are a literal ** are globstars. They match anything. if chunk == '**': if last_chunk: # Match anything if this is the last component pat += '.*' else: # Match '(name/)*' pat += '(?:%s+%s)*' % (valid_char, sep) continue # Break here as the whole path component has been handled # Find any special characters in the remainder i = 0 chunk_len = len(chunk) while i < chunk_len: char = chunk[i] if char == '*': # Match any number of name characters pat += valid_char + '*' elif char == '?': # Match a name character pat += valid_char elif char == '[': # Character class inner_i = i + 1 # Skip initial !/] chars if inner_i < chunk_len and chunk[inner_i] == '!': inner_i = inner_i + 1 if inner_i < chunk_len and chunk[inner_i] == ']': inner_i = inner_i + 1 # Loop till the closing ] is found while inner_i < chunk_len and chunk[inner_i] != ']': inner_i = inner_i + 1 if inner_i >= chunk_len: # Got to the end of the string without finding a closing ] # Do not treat this as a matching group, but as a literal [ pat += re.escape(char) else: # Grab the insides of the [brackets] inner = chunk[i + 1:inner_i] char_class = '' # Class negation if inner[0] == '!': char_class = '^' inner = inner[1:] char_class += re.escape(inner) pat += '[%s]' % (char_class,) # Skip to the end ] i = inner_i else: pat += re.escape(char) i += 1 # Join each chunk with the dir separator if not last_chunk: pat += sep pat += r'\Z' return re.compile(pat, flags=re.MULTILINE|re.DOTALL) class egg_info(Command): description = "create a distribution's .egg-info directory" user_options = [ ('egg-base=', 'e', "directory containing .egg-info directories" " (default: top of the source tree)"), ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"), ('tag-build=', 'b', "Specify explicit tag to add to version number"), ('no-date', 'D', "Don't include date stamp [default]"), ] boolean_options = ['tag-date'] negative_opt = { 'no-date': 'tag-date', } def initialize_options(self): self.egg_name = None self.egg_version = None self.egg_base = None self.egg_info = None self.tag_build = None self.tag_date = 0 self.broken_egg_info = False self.vtags = None #################################### # allow the 'tag_svn_revision' to be detected and # set, supporting sdists built on older Setuptools. @property def tag_svn_revision(self): pass @tag_svn_revision.setter def tag_svn_revision(self, value): pass #################################### def save_version_info(self, filename): """ Materialize the value of date into the build tag. Install build keys in a deterministic order to avoid arbitrary reordering on subsequent builds. """ # python 2.6 compatibility odict = getattr(collections, 'OrderedDict', dict) egg_info = odict() # follow the order these keys would have been added # when PYTHONHASHSEED=0 egg_info['tag_build'] = self.tags() egg_info['tag_date'] = 0 edit_config(filename, dict(egg_info=egg_info)) def finalize_options(self): self.egg_name = safe_name(self.distribution.get_name()) self.vtags = self.tags() self.egg_version = self.tagged_version() parsed_version = parse_version(self.egg_version) try: is_version = isinstance(parsed_version, packaging.version.Version) spec = ( "%s==%s" if is_version else "%s===%s" ) list( parse_requirements(spec % (self.egg_name, self.egg_version)) ) except ValueError: raise distutils.errors.DistutilsOptionError( "Invalid distribution name or version syntax: %s-%s" % (self.egg_name, self.egg_version) ) if self.egg_base is None: dirs = self.distribution.package_dir self.egg_base = (dirs or {}).get('', os.curdir) self.ensure_dirname('egg_base') self.egg_info = to_filename(self.egg_name) + '.egg-info' if self.egg_base != os.curdir: self.egg_info = os.path.join(self.egg_base, self.egg_info) if '-' in self.egg_name: self.check_broken_egg_info() # Set package version for the benefit of dumber commands # (e.g. sdist, bdist_wininst, etc.) # self.distribution.metadata.version = self.egg_version # If we bootstrapped around the lack of a PKG-INFO, as might be the # case in a fresh checkout, make sure that any special tags get added # to the version info # pd = self.distribution._patched_dist if pd is not None and pd.key == self.egg_name.lower(): pd._version = self.egg_version pd._parsed_version = parse_version(self.egg_version) self.distribution._patched_dist = None def write_or_delete_file(self, what, filename, data, force=False): """Write `data` to `filename` or delete if empty If `data` is non-empty, this routine is the same as ``write_file()``. If `data` is empty but not ``None``, this is the same as calling ``delete_file(filename)`. If `data` is ``None``, then this is a no-op unless `filename` exists, in which case a warning is issued about the orphaned file (if `force` is false), or deleted (if `force` is true). """ if data: self.write_file(what, filename, data) elif os.path.exists(filename): if data is None and not force: log.warn( "%s not set in setup(), but %s exists", what, filename ) return else: self.delete_file(filename) def write_file(self, what, filename, data): """Write `data` to `filename` (if not a dry run) after announcing it `what` is used in a log message to identify what is being written to the file. """ log.info("writing %s to %s", what, filename) if six.PY3: data = data.encode("utf-8") if not self.dry_run: f = open(filename, 'wb') f.write(data) f.close() def delete_file(self, filename): """Delete `filename` (if not a dry run) after announcing it""" log.info("deleting %s", filename) if not self.dry_run: os.unlink(filename) def tagged_version(self): version = self.distribution.get_version() # egg_info may be called more than once for a distribution, # in which case the version string already contains all tags. if self.vtags and version.endswith(self.vtags): return safe_version(version) return safe_version(version + self.vtags) def run(self): self.mkpath(self.egg_info) installer = self.distribution.fetch_build_egg for ep in iter_entry_points('egg_info.writers'): ep.require(installer=installer) writer = ep.resolve() writer(self, ep.name, os.path.join(self.egg_info, ep.name)) # Get rid of native_libs.txt if it was put there by older bdist_egg nl = os.path.join(self.egg_info, "native_libs.txt") if os.path.exists(nl): self.delete_file(nl) self.find_sources() def tags(self): version = '' if self.tag_build: version += self.tag_build if self.tag_date: version += time.strftime("-%Y%m%d") return version def find_sources(self): """Generate SOURCES.txt manifest file""" manifest_filename = os.path.join(self.egg_info, "SOURCES.txt") mm = manifest_maker(self.distribution) mm.manifest = manifest_filename mm.run() self.filelist = mm.filelist def check_broken_egg_info(self): bei = self.egg_name + '.egg-info' if self.egg_base != os.curdir: bei = os.path.join(self.egg_base, bei) if os.path.exists(bei): log.warn( "-" * 78 + '\n' "Note: Your current .egg-info directory has a '-' in its name;" '\nthis will not work correctly with "setup.py develop".\n\n' 'Please rename %s to %s to correct this problem.\n' + '-' * 78, bei, self.egg_info ) self.broken_egg_info = self.egg_info self.egg_info = bei # make it work for now class FileList(_FileList): # Implementations of the various MANIFEST.in commands def process_template_line(self, line): # Parse the line: split it up, make sure the right number of words # is there, and return the relevant words. 'action' is always # defined: it's the first word of the line. Which of the other # three are defined depends on the action; it'll be either # patterns, (dir and patterns), or (dir_pattern). (action, patterns, dir, dir_pattern) = self._parse_template_line(line) # OK, now we know that the action is valid and we have the # right number of words on the line for that action -- so we # can proceed with minimal error-checking. if action == 'include': self.debug_print("include " + ' '.join(patterns)) for pattern in patterns: if not self.include(pattern): log.warn("warning: no files found matching '%s'", pattern) elif action == 'exclude': self.debug_print("exclude " + ' '.join(patterns)) for pattern in patterns: if not self.exclude(pattern): log.warn(("warning: no previously-included files " "found matching '%s'"), pattern) elif action == 'global-include': self.debug_print("global-include " + ' '.join(patterns)) for pattern in patterns: if not self.global_include(pattern): log.warn(("warning: no files found matching '%s' " "anywhere in distribution"), pattern) elif action == 'global-exclude': self.debug_print("global-exclude " + ' '.join(patterns)) for pattern in patterns: if not self.global_exclude(pattern): log.warn(("warning: no previously-included files matching " "'%s' found anywhere in distribution"), pattern) elif action == 'recursive-include': self.debug_print("recursive-include %s %s" % (dir, ' '.join(patterns))) for pattern in patterns: if not self.recursive_include(dir, pattern): log.warn(("warning: no files found matching '%s' " "under directory '%s'"), pattern, dir) elif action == 'recursive-exclude': self.debug_print("recursive-exclude %s %s" % (dir, ' '.join(patterns))) for pattern in patterns: if not self.recursive_exclude(dir, pattern): log.warn(("warning: no previously-included files matching " "'%s' found under directory '%s'"), pattern, dir) elif action == 'graft': self.debug_print("graft " + dir_pattern) if not self.graft(dir_pattern): log.warn("warning: no directories found matching '%s'", dir_pattern) elif action == 'prune': self.debug_print("prune " + dir_pattern) if not self.prune(dir_pattern): log.warn(("no previously-included directories found " "matching '%s'"), dir_pattern) else: raise DistutilsInternalError( "this cannot happen: invalid action '%s'" % action) def _remove_files(self, predicate): """ Remove all files from the file list that match the predicate. Return True if any matching files were removed """ found = False for i in range(len(self.files) - 1, -1, -1): if predicate(self.files[i]): self.debug_print(" removing " + self.files[i]) del self.files[i] found = True return found def include(self, pattern): """Include files that match 'pattern'.""" found = [f for f in glob(pattern) if not os.path.isdir(f)] self.extend(found) return bool(found) def exclude(self, pattern): """Exclude files that match 'pattern'.""" match = translate_pattern(pattern) return self._remove_files(match.match) def recursive_include(self, dir, pattern): """ Include all files anywhere in 'dir/' that match the pattern. """ full_pattern = os.path.join(dir, '**', pattern) found = [f for f in glob(full_pattern, recursive=True) if not os.path.isdir(f)] self.extend(found) return bool(found) def recursive_exclude(self, dir, pattern): """ Exclude any file anywhere in 'dir/' that match the pattern. """ match = translate_pattern(os.path.join(dir, '**', pattern)) return self._remove_files(match.match) def graft(self, dir): """Include all files from 'dir/'.""" found = [ item for match_dir in glob(dir) for item in distutils.filelist.findall(match_dir) ] self.extend(found) return bool(found) def prune(self, dir): """Filter out files from 'dir/'.""" match = translate_pattern(os.path.join(dir, '**')) return self._remove_files(match.match) def global_include(self, pattern): """ Include all files anywhere in the current directory that match the pattern. This is very inefficient on large file trees. """ if self.allfiles is None: self.findall() match = translate_pattern(os.path.join('**', pattern)) found = [f for f in self.allfiles if match.match(f)] self.extend(found) return bool(found) def global_exclude(self, pattern): """ Exclude all files anywhere that match the pattern. """ match = translate_pattern(os.path.join('**', pattern)) return self._remove_files(match.match) def append(self, item): if item.endswith('\r'): # Fix older sdists built on Windows item = item[:-1] path = convert_path(item) if self._safe_path(path): self.files.append(path) def extend(self, paths): self.files.extend(filter(self._safe_path, paths)) def _repair(self): """ Replace self.files with only safe paths Because some owners of FileList manipulate the underlying ``files`` attribute directly, this method must be called to repair those paths. """ self.files = list(filter(self._safe_path, self.files)) def _safe_path(self, path): enc_warn = "'%s' not %s encodable -- skipping" # To avoid accidental trans-codings errors, first to unicode u_path = unicode_utils.filesys_decode(path) if u_path is None: log.warn("'%s' in unexpected encoding -- skipping" % path) return False # Must ensure utf-8 encodability utf8_path = unicode_utils.try_encode(u_path, "utf-8") if utf8_path is None: log.warn(enc_warn, path, 'utf-8') return False try: # accept is either way checks out if os.path.exists(u_path) or os.path.exists(utf8_path): return True # this will catch any encode errors decoding u_path except UnicodeEncodeError: log.warn(enc_warn, path, sys.getfilesystemencoding()) class manifest_maker(sdist): template = "MANIFEST.in" def initialize_options(self): self.use_defaults = 1 self.prune = 1 self.manifest_only = 1 self.force_manifest = 1 def finalize_options(self): pass def run(self): self.filelist = FileList() if not os.path.exists(self.manifest): self.write_manifest() # it must exist so it'll get in the list self.add_defaults() if os.path.exists(self.template): self.read_template() self.prune_file_list() self.filelist.sort() self.filelist.remove_duplicates() self.write_manifest() def _manifest_normalize(self, path): path = unicode_utils.filesys_decode(path) return path.replace(os.sep, '/') def write_manifest(self): """ Write the file list in 'self.filelist' to the manifest file named by 'self.manifest'. """ self.filelist._repair() # Now _repairs should encodability, but not unicode files = [self._manifest_normalize(f) for f in self.filelist.files] msg = "writing manifest file '%s'" % self.manifest self.execute(write_file, (self.manifest, files), msg) def warn(self, msg): if not self._should_suppress_warning(msg): sdist.warn(self, msg) @staticmethod def _should_suppress_warning(msg): """ suppress missing-file warnings from sdist """ return re.match(r"standard file .*not found", msg) def add_defaults(self): sdist.add_defaults(self) self.filelist.append(self.template) self.filelist.append(self.manifest) rcfiles = list(walk_revctrl()) if rcfiles: self.filelist.extend(rcfiles) elif os.path.exists(self.manifest): self.read_manifest() ei_cmd = self.get_finalized_command('egg_info') self.filelist.graft(ei_cmd.egg_info) def prune_file_list(self): build = self.get_finalized_command('build') base_dir = self.distribution.get_fullname() self.filelist.prune(build.build_base) self.filelist.prune(base_dir) sep = re.escape(os.sep) self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep, is_regex=1) def write_file(filename, contents): """Create a file with the specified name and write 'contents' (a sequence of strings without line terminators) to it. """ contents = "\n".join(contents) # assuming the contents has been vetted for utf-8 encoding contents = contents.encode("utf-8") with open(filename, "wb") as f: # always write POSIX-style manifest f.write(contents) def write_pkg_info(cmd, basename, filename): log.info("writing %s", filename) if not cmd.dry_run: metadata = cmd.distribution.metadata metadata.version, oldver = cmd.egg_version, metadata.version metadata.name, oldname = cmd.egg_name, metadata.name metadata.long_description_content_type = getattr( cmd.distribution, 'long_description_content_type' ) try: # write unescaped data to PKG-INFO, so older pkg_resources # can still parse it metadata.write_pkg_info(cmd.egg_info) finally: metadata.name, metadata.version = oldname, oldver safe = getattr(cmd.distribution, 'zip_safe', None) bdist_egg.write_safety_flag(cmd.egg_info, safe) def warn_depends_obsolete(cmd, basename, filename): if os.path.exists(filename): log.warn( "WARNING: 'depends.txt' is not used by setuptools 0.6!\n" "Use the install_requires/extras_require setup() args instead." ) def _write_requirements(stream, reqs): lines = yield_lines(reqs or ()) append_cr = lambda line: line + '\n' lines = map(append_cr, lines) stream.writelines(lines) def write_requirements(cmd, basename, filename): dist = cmd.distribution data = six.StringIO() _write_requirements(data, dist.install_requires) extras_require = dist.extras_require or {} for extra in sorted(extras_require): data.write('\n[{extra}]\n'.format(**vars())) _write_requirements(data, extras_require[extra]) cmd.write_or_delete_file("requirements", filename, data.getvalue()) def write_setup_requirements(cmd, basename, filename): data = StringIO() _write_requirements(data, cmd.distribution.setup_requires) cmd.write_or_delete_file("setup-requirements", filename, data.getvalue()) def write_toplevel_names(cmd, basename, filename): pkgs = dict.fromkeys( [ k.split('.', 1)[0] for k in cmd.distribution.iter_distribution_names() ] ) cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n') def overwrite_arg(cmd, basename, filename): write_arg(cmd, basename, filename, True) def write_arg(cmd, basename, filename, force=False): argname = os.path.splitext(basename)[0] value = getattr(cmd.distribution, argname, None) if value is not None: value = '\n'.join(value) + '\n' cmd.write_or_delete_file(argname, filename, value, force) def write_entries(cmd, basename, filename): ep = cmd.distribution.entry_points if isinstance(ep, six.string_types) or ep is None: data = ep elif ep is not None: data = [] for section, contents in sorted(ep.items()): if not isinstance(contents, six.string_types): contents = EntryPoint.parse_group(section, contents) contents = '\n'.join(sorted(map(str, contents.values()))) data.append('[%s]\n%s\n\n' % (section, contents)) data = ''.join(data) cmd.write_or_delete_file('entry points', filename, data, True) def get_pkg_info_revision(): """ Get a -r### off of PKG-INFO Version in case this is an sdist of a subversion revision. """ warnings.warn("get_pkg_info_revision is deprecated.", DeprecationWarning) if os.path.exists('PKG-INFO'): with io.open('PKG-INFO') as f: for line in f: match = re.match(r"Version:.*-r(\d+)\s*$", line) if match: return int(match.group(1)) return 0
apache-2.0
kcyu1993/ML_course_kyu
projects/project1/scripts/model.py
1
19450
from __future__ import absolute_import from abc import ABCMeta, abstractmethod import copy from data_utils import build_k_indices from learning_model import * from regularizer import * from helpers import save_numpy_array import numpy as np class Model(object): """ Author: Kaicheng Yu Machine learning model engine Implement the optimizers sgd normal equations cross-validation of given parameters Abstract method: __call__ produce the raw prediction, use the latest weight obtained by training predict produce prediction values, could take weight as input get_gradient define gradient here, including the gradient for regularizer normalequ define normal equations Support: L1, L2 normalization Due to the distribution of work, only LogisticRegression is fully tested for fitting data, and cross-validation. LinearRegression model should also work but not fully tested. The goal of this class is not only specific to this learning project, but also for reusable and scalable to other problems, models. """ def __init__(self, train_data, validation=None, initial_weight=None, loss_function_name='mse', cal_weight='gradient', regularizer=None, regularizer_p=None): """ Initializer of all learning models. :param train_data: training data. :param validation_data: """ self.train_x = train_data[1] self.train_y = train_data[0] self.set_valid(validation) ''' Define the progress of history here ''' self.losses = [] self.iterations = 0 self.weights = [] self.misclass_rate = [] ''' Define loss, weight calculation, regularizer ''' self.loss_function = get_loss_function(loss_function_name) self.loss_function_name = loss_function_name self.calculate_weight = cal_weight self.regularizer = Regularizer.get_regularizer(regularizer, regularizer_p) self.regularizer_p = regularizer_p # Asserting degree if len(self.train_x.shape) > 1: degree = self.train_x.shape[1] else: degree = 1 # Initialize the weight for linear model. if initial_weight is not None: self.weights.append(initial_weight) else: self.weights.append(np.random.rand(degree)) def set_valid(self, validation): # Set validation here. self.validation = False self.valid_x = None self.valid_y = None self.valid_losses = None self.valid_misclass_rate = None if validation is not None: (valid_y, valid_x) = validation self.valid_x = valid_x self.valid_y = valid_y self.validation = True self.valid_losses = [] self.valid_misclass_rate = [] @abstractmethod def __call__(self, **kwargs): """Define the fit function and get prediction""" raise NotImplementedError @abstractmethod def get_gradient(self, y, x, weight): raise NotImplementedError @abstractmethod def predict(self, x, weight): raise NotImplementedError @abstractmethod def normalequ(self, **kwargs): ''' define normal equation method to calculate optimal weights''' raise NotImplementedError def compute_weight(self, y, x, test_x=None, test_y=None, **kwargs): """ Return weight under given parameter """ model = copy.copy(self) model.__setattr__('train_y', y) model.__setattr__('train_x', x) if test_x is not None and test_y is not None: model.set_valid((test_y, test_x)) _kwargs = [] for name, value in kwargs.items(): # Recognize parameter " if name is "regularizer_p": model.__setattr__(name, value) model.regularizer.set_parameter(value) else: _kwargs.append((name, value)) _kwargs = dict(_kwargs) if model.calculate_weight is 'gradient': return model.sgd(**_kwargs) # elif model.calculate_weight is 'newton': # return model.newton(**_kwargs) elif model.calculate_weight is 'normalequ': return model.normalequ(**_kwargs) def get_history(self): """ Get the training history of current model :return: list as [iterations, [losses], [weights], [mis_class]] """ if self.validation: return self.iterations, (self.losses, self.valid_losses), \ (self.weights), (self.misclass_rate, self.valid_misclass_rate) return self.iterations, self.losses, self.weights, self.misclass_rate def train(self, optimizer='sgd', loss_function='mse', **kwargs): """ Train function to perform one time training Will based optimizer to select. TODO: Would add 'newton' in the future This :param optimizer: only support 'sgd' :param loss_function: loss_function name {mse, mae, logistic} :param kwargs: passed into sgd :return: best weight """ self.loss_function = get_loss_function(loss_function) self.loss_function_name = loss_function if optimizer is 'sgd': self.sgd(**kwargs) return self.weights[-1] """====================================""" """ Beginning of the optimize Routines """ """====================================""" def sgd(self, lr=0.01, decay=0.5, max_iters=1000, batch_size=128, early_stop=150, decay_intval=50, decay_lim=9): """ Define the SGD algorithm here Implementing weight decay, early stop. :param lr: learning rate :param decay: weight decay after fix iterations :param max_iters: maximum iterations :param batch_size: batch_size :param early_stop: early_stop after no improvement :return: final weight vector """ np.set_printoptions(precision=4) w = self.weights[0] loss = self.compute_loss(self.train_y, self.train_x, w) best_loss = loss best_counter = 0 decay_counter = 0 # print("initial loss is {} ".format(loss)) for epoch in range(max_iters): for batch_y, batch_x in batch_iter(self.train_y, self.train_x, batch_size): grad = self.get_gradient(batch_y, batch_x, w) w = w - lr * grad loss = self.compute_loss(self.train_y, self.train_x, w) mis_class = self.compute_metrics(self.train_y, self.train_x, w) self.weights.append(w) self.losses.append(loss) self.misclass_rate.append(mis_class) if self.validation is True: valid_loss = self.compute_loss(self.valid_y, self.valid_x, w) valid_mis_class = self.compute_metrics(self.valid_y, self.valid_x, w) self.valid_losses.append(valid_loss) self.valid_misclass_rate.append(valid_mis_class) # Display every 25 epoch if (epoch + 1) % 25 == 0: print('Epoch {e} in {m}'.format(e=epoch + 1, m=max_iters), end="\t") if self.validation is True: # print('\tTrain Loss {0:0.4f}, \tTrain mis-class {0:0.4f}, ' # '\tvalid loss {0:0.4f}, \tvalid mis-class {0:0.4f}'. # format(loss, mis_class, valid_loss, valid_mis_class)) print('\tTrain Loss {}, \tTrain mis-class {}, ' '\tvalid loss {}, \tvalid mis-class {}'. format(loss, mis_class, valid_loss, valid_mis_class)) else: print('\tTrain Loss {}, \tTrain mis-class {}'. format(loss, mis_class)) # judge the performance if best_loss - loss > 0.000001: best_loss = loss best_counter = 0 else: best_counter += 1 if best_counter > early_stop: print("Learning early stop since loss not improving for {} epoch.".format(best_counter)) break if best_counter % decay_intval == 0: print("weight decay by {}".format(decay)) lr *= decay decay_counter += 1 if decay_counter > decay_lim: print("decay {} times, stop".format(decay_lim)) break return self.weights[-1] def newton(self, lr=0.01, max_iters=100): # TODO: implement newton method later raise NotImplementedError def cross_validation(self, cv, lambdas, lambda_name, seed=1, skip=False, plot=False, **kwargs): """ Cross validation method to acquire the best prediction parameters. It will use the train_x y as data and do K-fold cross validation. :param cv: cross validation times :param lambdas: array of lambdas to be validated :param lambda_name: the lambda name tag :param seed: random seed :param skip: skip the cross validation, only valid 1 time :param plot plot cross-validation plot, if machine does not support matplotlib.pyplot, set to false. :param kwargs: other parameters could pass into compute_weight :return: best weights, best_lambda, (training error, valid error) """ np.set_printoptions(precision=4) k_indices = build_k_indices(self.train_y, cv, seed) # define lists to store the loss of training data and test data err_tr = [] err_te = [] weights = [] print("K-fold ({}) cross validation to examine [{}]". format(cv, lambdas)) for lamb in lambdas: print("For lambda: {}".format(lamb)) _mse_tr = [] _mse_te = [] _weight = [] for k in range(cv): print('Cross valid iteration {}'.format(k)) weight, loss_tr, loss_te = self._loop_cross_validation(self.train_y, self.train_x, k_indices, k, lamb, lambda_name, **kwargs) _mse_tr += [loss_tr] _mse_te += [loss_te] _weight.append(weight) if skip: break avg_tr = np.average(_mse_tr) avg_te = np.average(_mse_te) err_tr += [avg_tr] err_te += [avg_te] weights.append(_weight) print("\t train error {}, \t valid error {}". format(avg_tr, avg_te)) # Select the best parameter during the cross validations. print('K-fold cross validation result: \n {} \n {}'. format(err_tr, err_te)) # Select the best based on least err_te min_err_te = np.argmin(err_te) print('Best err_te result {}, lambda {}'. format(err_te[min_err_te], lambdas[min_err_te])) if plot: from plots import cross_validation_visualization cross_validation_visualization(lambdas, err_tr, err_te, title=lambda_name, error_name=self.loss_function_name) else: save_numpy_array(lambdas, err_tr, err_te, names=['lambda', 'err_tr', 'err_te'], title=self.regularizer.name) return weights[min_err_te], lambdas[min_err_te], (err_tr, err_te) def _loop_cross_validation(self, y, x, k_indices, k, lamb, lambda_name, **kwargs): """ Single loop of cross validation :param y: train labels :param x: train data :param k_indices: indices array :param k: number of cross validations :param lamb: lambda to use :param lambda_name: lambda_name to pass into compute weight :return: weight, mis_tr, mis_te """ train_ind = np.concatenate((k_indices[:k], k_indices[k + 1:]), axis=0) train_ind = np.reshape(train_ind, (train_ind.size,)) test_ind = k_indices[k] # Note: different from np.ndarray, tuple is name[index,] # ndarray is name[index,:] train_x = x[train_ind,] train_y = y[train_ind,] test_x = x[test_ind,] test_y = y[test_ind,] # Insert one more kwargs item kwargs[lambda_name] = lamb weight = self.compute_weight(train_y, train_x, test_x, test_y, **kwargs) # Compute the metrics and return loss_tr = self.compute_metrics(train_y, train_x, weight) loss_te = self.compute_metrics(test_y, test_x, weight) return weight, loss_tr, loss_te def compute_metrics(self, target, data, weight): """ Compute the following metrics Misclassification rate """ pred = self.predict(data, weight) assert len(pred) == len(target) # Calculate the mis-classification rate: N = len(pred) pred = np.reshape(pred, (N,)) target = np.reshape(target, (N,)) nb_misclass = np.count_nonzero(target - pred) return nb_misclass / N def compute_loss(self, y, x, weight): return self.loss_function(y, x, weight) class LogisticRegression(Model): """ Logistic regression """ def __init__(self, train, validation=None, initial_weight=None, loss_function_name='logistic', calculate_weight='gradient', regularizer=None, regularizer_p=None): """ Constructor of Logistic Regression model :param train: tuple (y, x) :param validation: tuple (y, x) :param initial_weight: weight vector, dim align x :param loss_function: f(x, y, weight) :param regularizer: "Ridge" || "Lasso" :param regularizer_p: parameter """ # Initialize the super class with given data. # Transform the y into {0,1} y, tx = train y[np.where(y < 0)] = 0 train = (y, tx) if validation: val_y, val_tx = validation val_y[np.where(val_y < 0)] = 0 validation = (val_y, val_tx) super(LogisticRegression, self).__init__(train, validation, initial_weight=initial_weight, loss_function_name=loss_function_name, cal_weight=calculate_weight, regularizer=regularizer, regularizer_p=regularizer_p) # Set predicted label self.pred_label = [-1, 1] def __call__(self, x, weight=None): """ Define the fit function and get prediction, generate probability of occurrence """ if weight is None: weight = self.weights[-1] return sigmoid(np.dot(x, weight)) def get_gradient(self, y, x, weight): """ calculate gradient given data and weight """ y = np.reshape(y, (len(y),)) return np.dot(x.T, sigmoid(np.dot(x, weight)) - y) \ + self.regularizer.get_gradient(weight) def get_hessian(self, y, x, weight): # TODO: implement hessian for newton method raise NotImplementedError def predict(self, x, weight=None, cutting=0.5): """ Prediction of event {0,1} """ if weight is None: weight = self.weights[-1] pred = sigmoid(np.dot(x, weight)) pred[np.where(pred <= cutting)] = 0 pred[np.where(pred > cutting)] = 1 return pred def predict_label(self, x, weight=None, cutting=0.5, predict_label=None): """ Prediction result with labels """ if predict_label is None: predict_label = self.pred_label if weight is None: weight = self.weights[-1] pred = self.predict(x, weight, cutting) pred[np.where(pred == 0)] = predict_label[0] pred[np.where(pred == 1)] = predict_label[1] return pred def train(self, loss_function='logistic', lr=0.1, decay=0.5, max_iters=3000, batch_size=128, **kwargs): """ Make the default loss logistic, set default parameters """ return super(LogisticRegression, self).train('sgd', loss_function, lr=lr, decay=decay, max_iters=max_iters, batch_size=batch_size, **kwargs) def normalequ(self, **kwargs): """ Should never call """ raise NotImplementedError class LinearRegression(Model): """ Linear regression model This is not fully tested, especially the cross-validation, please refers to the implemenations.py for linear model. """ def __init__(self, train, validation=None, initial_weight=None, regularizer=None, regularizer_p=None, loss_function_name='mse', calculate_weight='normalequ'): # Initialize the super class with given data. super(LinearRegression, self).__init__(train, validation, initial_weight=initial_weight, loss_function_name=loss_function_name, cal_weight=calculate_weight, regularizer=regularizer, regularizer_p=regularizer_p) def __call__(self, x): """ calulate prediction based on latest result """ return np.dot(x, self.weights[-1]) def get_gradient(self, batch_y, batch_x, weight): """ return gradient of linear model, including the regularizer """ N = batch_y.shape[0] grad = np.empty(len(weight)) for index in range(N): _y = batch_y[index] _x = batch_x[index] grad = grad + gradient_least_square(_y, _x, weight, self.loss_function_name) grad /= N grad += self.regularizer.get_gradient(weight) return grad def predict(self, x, weight): """ Prediction function, predicting final result """ pred = np.dot(x, weight) pred[np.where(pred <= 0)] = -1 pred[np.where(pred > 0)] = 1 return pred def normalequ(self): """ Normal equation to get parameters """ tx = self.train_x y = self.train_y if self.regularizer is None: return np.linalg.solve(np.dot(tx.T, tx), np.dot(tx.T, y)) elif self.regularizer.name is 'Ridge': G = np.eye(tx.shape[1]) G[0, 0] = 0 hes = np.dot(tx.T, tx) + self.regularizer_p * G return np.linalg.solve(hes, np.dot(tx.T, y)) else: raise NotImplementedError
mit
msimet/Stile
devel/make_simple_catalog.py
1
1417
import galsim import numpy use_noise = True extent_degrees = 1. # Create galaxies within a box of this side length n_galaxies_per_sq_arcmin = 20 z_min = 0.1 z_max = 2.0 z_powerlaw_slope = 2.0 z_lens = 0.2 def make_safe_shear(g): if g[0]>1: g[0] = 1 if g[1]>1: g[1] = 1 mag = numpy.sqrt(g[0]**2+g[1]**2) if mag>0.99999: g /= (mag+0.00001) return g def main(): z_offs = z_min**(z_powerlaw_slope+1) n_total_galaxies = int(extent_degrees**2*3600*n_galaxies_per_sq_arcmin) halo = galsim.NFWHalo(mass=1.E14, conc=4., redshift=z_lens) for i in range(n_total_galaxies): ra,dec = extent_degrees*numpy.random.rand(2)-0.5*extent_degrees z = ((z_powerlaw_slope+1)*numpy.random.random()+z_offs)**(1./(z_powerlaw_slope+1)) if use_noise: g_int = make_safe_shear(numpy.random.normal(scale=0.35,size=2)) g_int = galsim.Shear(g1=g_int[0], g2=g_int[1]) else: g_int = galsim.Shear(g1=0,g2=0) if z>z_lens: g_induced = halo.getShear(galsim.PositionD(3600*ra,3600*dec),z) # g_induced = (min(g_induced[0],1),min(g_induced[0],1)) g_induced = galsim.Shear(g1=g_induced[0],g2=g_induced[1]) g_total = g_induced+g_int else: g_total = g_int print i, ra, dec, z, g_total.getG1(), g_total.getG2() if __name__=='__main__': main()
bsd-3-clause
kdart/pycopia
core/pycopia/inet/rfc2822.py
1
15042
#!/usr/bin/python2.7 # -*- coding: utf-8 -*- # vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Objects for constructing, parsing, and editing rfc 2822 compliant messages (plus extensions). """ from __future__ import print_function import re from pycopia.inet.ABNF import * from pycopia.fsm import FSM, ANY from pycopia.aid import IF, Enums SPECIALS = '()<>@,:;."[]' LWS = WSP EXTRA = "!#$%&'*+-/=?^_`{|}~" ATEXT = ALPHA+DIGIT+EXTRA HEADBREAK = CRLF+CRLF FOLDED = re.compile(r'%s([%s]+)' % (CRLF, WSP)) class RFC2822Error(Exception): pass class RFC2822SyntaxError(RFC2822Error): pass class _RFC2822FSM(FSM): def reset(self): self._reset() self.arg = '' self.cl_name = None self.cl_params = {} self.cl_value = None def unfold(s): """Unfold a folded string, keeping line breaks and other white space.""" return FOLDED.sub(r"\1", s) def headerlines(bigstring): """Yield unfolded lines from a chunk of text.""" bigstring = unfold(bigstring) for line in bigstring.split(CRLF): yield line def get_headers(fo): s = [] b = 4 while 1: data = fo.read(80) if not data: break i = data.find(HEADBREAK) if i == -1: # catch HEADBREAK split across chunks if data.startswith("\r"): if s[-1].endswith("\r\n"): b = 2 break elif data.startswith("\n"): if s[-1].endswith("\r\n\r"): b = 1 break else: s.append(data) continue else: s.append(data[:i+4]) break rv = [] for line in headerlines("".join(s)): if line: rv.append(getHeader(line)) return rv, data[i+b:] def getHeader(line): [name, val] = line.split(":", 1) return Header(name.strip(), val.lstrip()) def get_headers_dict(fo): headers, left = get_headers(fo) rv = Headers() for h in headers: rv[h.name] = h.value return rv, left class Header(object): """base class for header objects.""" def __init__(self, name, value): self.name = name self.value = value def __str__(self): return "%s: %s" % (self.name, self.value) def __repr__(self): return "%s(%r, %r)" % (self.__class__.__name__, self.name, self.value) def __hash__(self): return hash(self.name) def __eq__(self, other): return self.name.upper() == other.name.upper() def __ne__(self, other): return self.name.upper() != other.name.upper() def __lt__(self, other): return self.name.upper() < other.name.upper() def __gt__(self, other): return self.name.upper() > other.name.upper() def __le__(self, other): return self.name.upper() <= other.name.upper() def __ge__(self, other): return self.name.upper() >= other.name.upper() # concrete header fields. These encapsulate any special rules for methods for # its kind. The names of these are significant... the actual heading value is # taken from it with some translation applied. class Return_Path(Header): def __str__(self): return "%s: %s" % (self.NAME, IF(self.data, self.value, "<>")) class Date(Header): def __init__(self, timevalue=None): Header.__init__(self) self.value = timevalue def __str__(self): return "%s: %s" % (self.name, formatdate(self.value) ) class From(Header): pass class Sender(Header): pass class Reply_To(Header): pass class To(Header): pass class Cc(Header): pass class Bcc(Header): pass class Message_ID(Header): pass class In_Reply_To(Header): pass class References(Header): pass class Subject(Header): pass class Comments(Header): pass class Keywords(Header): pass class Resent_Date(Header): pass class Resent_From(Header): pass class Resent_Sender(Header): pass class Resent_To(Header): pass class Resent_Cc(Header): pass class Resent_Bcc(Header): pass class Resent_Message_ID(Header): pass class Return_Path(Header): pass class Recieved(Header): pass ##### message parts ##### class Headers(dict): """A Collection of headers. No duplicates allowed here.""" def __setitem__(self, name, ho): dict.__setitem__(self, name.lower(), ho) def __delitem__(self, name): dict.__delitem__(self, name.lower()) def __getitem__(self, name): try: return dict.__getitem__(self, name.lower()) except KeyError: return None def get(self, key, default=None): return dict.get(self, key.lower(), default) def emit(self, fo): for h in self.values(): fo.write(str(h)) class Body(object): def __init__(self, text=""): self.text = str(text) def __str__(self): return self.text def emit(self, fo): fo.write(self.text) class Message(object): """Represents an email message.""" def __init__(self, header, body=None): self.header = header self.body = body or Body() def __str__(self): return str(self.header)+"\n\n"+str(self.body) def emit(self, fo): self.header.emit(fo) fo.write("\n\n") self.body.emit(fo) class QuotedString(object): """QuotedString(data) Represents an quoted string. Automatically encodes the value. """ def __init__(self, val): self.data = val def __str__(self): return quote(str(self.data)) def __repr__(self): return "%s(%r)" % (self.__class__, self.data) def parse(self, data): self.data = unquote(data) class Comment(object): """A header comment. """ def __init__(self, item): self.data = item def __str__(self): return "( %s )" % (self.item) class Address(object): def __init__(self, address, name=None): self.address = address self.name = name def __str__(self): if self.name: return '"%s" <%s>' % (self.name, self.address) else: return str(self.address) def __len__(self): return len(str(self)) def __repr__(self): return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.name) def __eq__(self, other): try: return self.address == other.address and self.name == other.name except AttributeError: return str(self) == str(other) # other might just be a string def __ne__(self, other): try: return self.address != other.address or self.name != other.name except AttributeError: return str(self) != str(other) # other might just be a string class AddressList(list): def append(self, address, name=None): super(AddressList, self).append(Address(address, name)) add = append def insert(self, i, address, name=None): super(AddressList, self).insert(i, Address(address, name)) def __str__(self): return ", ".join(map(str, self)) def formatdate(timeval=None): """Returns time format preferred for Internet standards. Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 According to RFC 1123, day and month names must always be in English. If not for that, this code could use strftime(). It can't because strftime() honors the locale and could generated non-English names. """ from pycopia import timelib if timeval is None: timeval = timelib.time() timeval = timelib.gmtime(timeval) return "%s, %02d %s %04d %02d:%02d:%02d GMT" % ( ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][timeval[6]], timeval[2], ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][timeval[1]-1], timeval[0], timeval[3], timeval[4], timeval[5]) class RFC2822Parser(object): def __init__(self): self._contenthandler = None self._errorhandler = None self._initfsm() def setContentHandler(self, handler): assert isinstance(handler, _HandlerBase), "must be handler object." self._contenthandler = handler def getContentHandler(self): return self._contenthandler def setErrorHandler(self, handler): self._errorhandler = handler def getErrorHandler(self): return self._errorhandler def parse(self, url): import urllib2 fo = urllib2.urlopen(url) try: self.parseFile(fo) finally: fo.close() # parser unfolds folded strings def parseFile(self, fo): self._contenthandler.startDocument() lastline = '' savedlines = [] while 1: line = fo.readline() if not line: if lastline: line = "".join(savedlines)+lastline self._process_line(line) break if not lastline: lastline = line continue if line[0] in WSP: savedlines.append(lastline.rstrip()) lastline = line[1:] continue if savedlines: newline = "".join(savedlines)+lastline savedlines = [] self._process_line(newline) lastline = line else: self._process_line(lastline) lastline = line self._contenthandler.endDocument() def _process_line(self, line): self._fsm.process_string(line) self._fsm.reset() # XXX def _initfsm(self): # state names: [NAME, SLASH, QUOTE, SLASHQUOTE, PARAM, PARAMNAME, VALUE, ENDLINE, PARAMVAL, PARAMQUOTE, VSLASH, ] = Enums( "NAME", "SLASH", "QUOTE", "SLASHQUOTE", "PARAM", "PARAMNAME", "VALUE", "ENDLINE", "PARAMVAL", "PARAMQUOTE", "VSLASH", ) f = _RFC2822FSM(NAME) f.add_default_transition(self._error, NAME) # f.add_transition_list(IANA_TOKEN, NAME, self._addtext, NAME) f.add_transition(":", NAME, self._endname, VALUE) f.add_transition(";", NAME, self._endname, PARAMNAME) f.add_transition_list(VALUE_CHAR, VALUE, self._addtext, VALUE) f.add_transition(CR, VALUE, None, ENDLINE) f.add_transition(LF, ENDLINE, self._doline, NAME) # parameters f.add_transition_list(IANA_TOKEN, PARAMNAME, self._addtext, PARAMNAME) f.add_transition("=", PARAMNAME, self._endparamname, PARAMVAL) f.add_transition_list(SAFE_CHAR, PARAMVAL, self._addtext, PARAMVAL) f.add_transition(",", PARAMVAL, self._nextparam, PARAMVAL) f.add_transition(";", PARAMVAL, self._nextparam, PARAMNAME) f.add_transition(DQUOTE, PARAMVAL, self._startquote, PARAMQUOTE) f.add_transition_list(QSAFE_CHAR, PARAMQUOTE, self._addtext, PARAMQUOTE) f.add_transition(DQUOTE, PARAMQUOTE, self._endquote, PARAMVAL) f.add_transition(":", PARAMVAL, self._nextparam, VALUE) # slashes f.add_transition("\\", VALUE, None, VSLASH) f.add_transition(ANY, VSLASH, self._slashescape, VALUE) # f.add_transition("\\", QUOTE, None, SLASHQUOTE) # f.add_transition(ANY, SLASHQUOTE, self._slashescape, QUOTE) # # double quotes # f.add_transition(DQUOTE, xxx, None, QUOTE) # f.add_transition(DQUOTE, QUOTE, self._doublequote, xxx) # f.add_transition(ANY, QUOTE, self._addtext, QUOTE) f.reset() self._fsm = f def _error(self, c, fsm): if self._errorhandler: self._errorhandler(c, fsm) else: fsm.reset() raise RFC2822SyntaxError('Syntax error: %s\n%r' % (c, fsm.stack)) def _addtext(self, c, fsm): fsm.arg += c _SPECIAL = {"r":"\r", "n":"\n", "t":"\t", "N":"\n"} def _slashescape(self, c, fsm): fsm.arg += self._SPECIAL.get(c, c) # def _doublequote(self, c, fsm): # self.arg_list.append(fsm.arg) # fsm.arg = '' def _startquote(self, c, fsm): fsm.arg = '' def _endquote(self, c, fsm): paramval = fsm.arg fsm.arg = '' fsm.cl_params[fsm.cl_paramname].append(paramval) def _endname(self, c, fsm): fsm.cl_name = ABNF.Literal(fsm.arg) fsm.arg = '' def _endparamname(self, c, fsm): name = ABNF.Literal(fsm.arg) fsm.cl_params[name] = [] fsm.cl_paramname = name fsm.arg = '' def _nextparam(self, c, fsm): paramval = fsm.arg fsm.arg = '' fsm.cl_params[fsm.cl_paramname].append(paramval) def _doline(self, c, fsm): value = fsm.arg fsm.arg = '' self._contenthandler.handleLine(fsm.cl_name, fsm.cl_params, value) class _HandlerBase(object): def handleLine(self, name, paramdict, value): pass def startDocument(self): pass def endDocument(self): pass class TestHandler(_HandlerBase): def handleLine(self, name, paramdict, value): print ("%r %r %r" % (name, paramdict, value)) def startDocument(self): print ("*** startDocument") def endDocument(self): print ("*** endDocument") class BufferedFile(object): def __init__(self): # The last partial line pushed into this object. self._partial = '' # The list of full, pushed lines, in reverse order self._lines = [] # A flag indicating whether the file has been closed or not. self._closed = False def close(self): # Don't forget any trailing partial line. self._lines.append(self._partial) self._partial = '' self._closed = True def readline(self): return '' # XXX pass def unreadline(self, line): self._lines.append(line) def push(self, data): pass def pushlines(self, lines): # Reverse and insert at the front of the lines. self._lines[:0] = lines[::-1] def is_closed(self): return self._closed def __iter__(self): return self def next(self): line = self.readline() if line == '': raise StopIteration return line
apache-2.0
alehander42/hatlog
hatlog/prolog.py
1
1202
def generate_prolog(x, name, file): header = '''\ :- initialization main. :- use_module(pythonTypeSystem). :- use_module(prettyTypes). ''' fun = generate_fun(x, name) m = '''main :- open('%s.txt', write, Stream), ( f(%s, Z0, Z1), unvar(Z0, Z1, Z2, Z3, Z4), %% replace free vars with names pretty_args(Z2, Y), pretty_type(Z3, Z), pretty_generic(Z4, X), format(Stream, '~a::', [X]), write(Stream, Y), write(Stream, ' -> '), write(Stream, Z), write(Stream, '\\n'), true ), close(Stream), halt. main :- halt(1). ''' %(file, name) program = '%s\n%s\n%s' % (header, fun, m) with open('%s.pl' % file, 'w') as f: f.write(program) return '%s.pl' % file def generate_fun(x, name): head = 'f(%s, [%s], %s) :-' % (name, ', '.join(x[-1][1]), x[-1][-1]) # print(x[:-1]) block = ',\n '.join(['%s(%s)' % (a, ', '.join(map(generate_arg, args + [b]))) for a, args, b in x[:-1]]) return '%s\n %s.\n' % (head, block) def generate_arg(a): if isinstance(a, str): return a else: return '[%s]' % ', '.join(map(generate_arg, a))
apache-2.0
tmerrick1/spack
var/spack/repos/builtin/packages/r-whisker/package.py
5
1636
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RWhisker(RPackage): """logicless templating, reuse templates in many programming languages including R""" homepage = "http://github.com/edwindj/whisker" url = "https://cran.r-project.org/src/contrib/whisker_0.3-2.tar.gz" list_url = "https://cran.r-project.org/src/contrib/Archive/whisker" version('0.3-2', 'c4b9bf9a22e69ce003fe68663ab5e8e6')
lgpl-2.1
makson96/free-engineer
tools/steam.py
2
8550
#!/usr/bin/env python3 #-*- coding: utf-8 -*- ##This software is available to you under the terms of the GPL-3, see "/usr/share/common-licenses/GPL-3". ##Copyright: ##- Tomasz Makarewicz ([email protected]) import os, tarfile, urllib.request, time, shutil from subprocess import Popen, PIPE recultis_dir = os.getenv("HOME") + "/.recultis/" steam_dir = recultis_dir + "shops/steam/" def start(login, password, recultis_dir, s_appid, game_dir): print("Starting SteamCMD procedure") shop_install_dir = recultis_dir + "shops/steam/" if os.path.isdir(shop_install_dir) == False: os.makedirs(shop_install_dir) #start of legacy code for Recultis 1.2 if os.path.isfile(recultis_dir+"steam.sh") == True: shutil.move(recultis_dir+"steam.sh", shop_install_dir) if os.path.isfile(recultis_dir+"steamcmd.sh") == True: shutil.move(recultis_dir+"steamcmd.sh", shop_install_dir) if os.path.isfile(recultis_dir+"steamcmd_linux.tar.gz") == True: shutil.move(recultis_dir+"steamcmd_linux.tar.gz", shop_install_dir) if os.path.isfile(recultis_dir+"steam_log.txt") == True: shutil.move(recultis_dir+"steam_log.txt", shop_install_dir) if os.path.isdir(recultis_dir+"linux32") == True: shutil.move(recultis_dir+"linux32", shop_install_dir) if os.path.isdir(recultis_dir+"linux64") == True: shutil.move(recultis_dir+"linux64", shop_install_dir) if os.path.isdir(recultis_dir+"package") == True: shutil.move(recultis_dir+"package", shop_install_dir) if os.path.isdir(recultis_dir+"public") == True: shutil.move(recultis_dir+"public", shop_install_dir) #end of legacy code for Recultis 1.2 os.chdir(shop_install_dir) if login == "" or password == "": steam_log_file = open("steam_log.txt", "w") steam_log_file.write("Steamcmd Error. Login or password not provided.\n") steam_log_file.close() print("Steamcmd Error. Login or password not provided. try again with correct one.") steam_error = 0 else: steamcmd_install(shop_install_dir) steam_error = 2 retry_nr = 0 while steam_error == 2: steam_error = run(login, password, shop_install_dir, s_appid, game_dir) if steam_error == 2: print("Steamcmd error. Retry.") retry_nr = retry_nr + 1 if retry_nr == 5: print("Steamcmd error. Reinstall steamcmd.") steamcmd_reinstall(shop_install_dir) elif retry_nr == 8: steam_error = 0 if steam_error == 0: steam_log_file = open("steam_log.txt", "a") steam_log_file.write("\nSteamcmd Error. Terminate.") steam_log_file.close() print("Steamcmd Error. Terminate.") return steam_error def steamcmd_install(shop_install_dir): print("Installing SteamCMD") if os.path.isfile(shop_install_dir+"steamcmd.sh") == False: urllib.request.urlretrieve("http://media.steampowered.com/client/steamcmd_linux.tar.gz", shop_install_dir + "steamcmd_linux.tar.gz") tar = tarfile.open(shop_install_dir + "steamcmd_linux.tar.gz") tar.extractall() tar.close() def get_last_log_line(): wrong_lines = ["CWorkThreadPool"] last_line_nr = -1 try: steam_log_file = open("steam_log.txt", "r") steam_log_lines = steam_log_file.readlines() if len(steam_log_lines) > 0: steam_last_line = steam_log_lines[last_line_nr] for w_line in wrong_lines: while w_line in steam_last_line: last_line_nr -= 1 steam_last_line = steam_log_lines[last_line_nr] else: steam_last_line = "" steam_log_file.close() except FileNotFoundError: steam_last_line = "" return steam_last_line def steam_guard(): while os.path.isfile(recultis_dir + "guard_key.txt") == False: time.sleep(2) print('Steam Guard Key detected. Verifying...') steam_guard_file = open(recultis_dir + "guard_key.txt", "r") steam_guard_code = steam_guard_file.readline() steam_guard_file.close() os.remove(recultis_dir + "guard_key.txt") print(str(steam_guard_code).upper()) return str(steam_guard_code.upper()) def run(login, password, shop_install_dir, s_appid, game_dir): if os.path.isfile(shop_install_dir+"steam_log.txt") == True: os.remove(shop_install_dir+"steam_log.txt") print("Running following steamcmd command:") print("./steamcmd.sh +@sSteamCmdForcePlatformType windows +login '" + login + "' '******' +force_install_dir " + game_dir + " +app_update " + s_appid + " validate +quit") print("Check " + shop_install_dir + "steam_log.txt for more details.") steam_download = Popen("script -q -c \"./steamcmd.sh +@sSteamCmdForcePlatformType windows +login '" + login + "' '" + password + "' +force_install_dir " + game_dir + " +app_update " + s_appid + " validate +quit\" /dev/null", shell=True, stdout=open("steam_log.txt", "wb"), stdin=PIPE) while steam_download.poll() is None: time.sleep(2) steam_last_line = get_last_log_line() #Terminate the process if bad login or password if "FAILED with result code" in steam_last_line: steam_download.terminate() return 0 #Terminate the process if not owning the game elif "Failed to install app" in steam_last_line: steam_download.terminate() return 0 #Retry 5 times if steamcmd has memory access error elif '$DEBUGGER "$STEAMEXE" "$@"' in steam_last_line: return 2 #If computer is not registered on Steam, handle Steam Guard elif 'Steam Guard' in steam_last_line: steam_guard_code = steam_guard() steam_download.stdin.write(bytes(steam_guard_code + '\n', 'ascii')) steam_download.stdin.flush() #if there is only 1 line after steamcmd finished working, it means it crashed. if sum(1 for line in open('steam_log.txt')) == 1: rc = 0 else: rc = 1 return rc def steamcmd_reinstall(shop_install_dir): print("Reinstalling SteamCMD") print("Removing SteamCMD") if os.path.isfile(shop_install_dir+"steam.sh") == True: os.remove(shop_install_dir+"steam.sh") if os.path.isfile(shop_install_dir+"steamcmd.sh") == True: os.remove(shop_install_dir+"steamcmd.sh") if os.path.isfile(shop_install_dir+"steamcmd_linux.tar.gz") == True: os.remove(shop_install_dir+"steamcmd_linux.tar.gz") if os.path.isdir(shop_install_dir+"linux32") == True: shutil.rmtree(shop_install_dir+"linux32") if os.path.isdir(shop_install_dir+"linux64") == True: shutil.rmtree(shop_install_dir+"linux64") if os.path.isdir(shop_install_dir+"package") == True: shutil.rmtree(shop_install_dir+"package") if os.path.isdir(shop_install_dir+"public") == True: shutil.rmtree(shop_install_dir+"public") steamcmd_install(shop_install_dir) def status(): if os.path.isdir(steam_dir) == True: os.chdir(steam_dir) else: status = "Preparing SteamCMD" percent = 0 return status, percent status = "Downloading and installing game data" percent = 0 steam_last_line = get_last_log_line() if steam_last_line == "": steam_last_line = "downloading, progress: 0,0 (" #This code handle steamcmd status if everything is ok if ("downloading, progress: " in steam_last_line) or ("validating, progress: " in steam_last_line): steam_value = steam_last_line.split("progress: ")[1] steam_value = steam_value.split(" (")[0] steam_value = steam_value.split(",")[0] steam_value = steam_value.split(".")[0] steam_value = int(steam_value) status = "Downloading and installing game data" percent = steam_value elif "Success!" in steam_last_line: status = "Download of game data completed" percent = 100 #this code handle steamcmd status if warning is present. elif "Steam Guard" in steam_last_line: status = "Warning: Waiting for Steam Guard authentication." percent = 0 #this code handle steamcmd status if steam tool marked steam_log.txt file with error. if "Steamcmd Error." in steam_last_line: try: steam_log_file = open("steam_log.txt", "r") steam_log_lines = steam_log_file.readlines() steam_error_line = steam_log_lines[-3] steam_log_file.close() except: steam_error_line = "Steamcmd Error. Terminate." if "FAILED with result code 5" in steam_error_line: status = "Error: Steam - bad login or password. Please correct and start again." percent = 0 elif "Login or password not provided." in steam_error_line: status = "Error: Steam - Login or password not provided. Try again with correct one." percent = 0 elif "Failed to install app" in steam_error_line: status = "Error: Steam - you are not game owner. Please correct and start again." percent = 0 elif "FAILED with result code 65" in steam_error_line: status = "Error: Could not perform Steam Guard authentication. Please try again." percent = 0 else: status = "Error: Steamcmd internal error. Please contact Recultis project for support." percent = 0 return status, percent
gpl-3.0
spl0k/supysonic
supysonic/cli.py
1
15855
# This file is part of Supysonic. # Supysonic is a Python implementation of the Subsonic server API. # # Copyright (C) 2013-2021 Alban 'spl0k' Féron # # Distributed under terms of the GNU AGPLv3 license. import argparse import cmd import getpass import shlex import sys import time from pony.orm import db_session, select from pony.orm import ObjectNotFound from .config import IniConfig from .daemon.client import DaemonClient from .daemon.exceptions import DaemonUnavailableError from .db import Folder, User, init_database, release_database from .managers.folder import FolderManager from .managers.user import UserManager from .scanner import Scanner class TimedProgressDisplay: def __init__(self, stdout, interval=5): self.__stdout = stdout self.__interval = interval self.__last_display = 0 self.__last_len = 0 def __call__(self, name, scanned): if time.time() - self.__last_display > self.__interval: progress = "Scanning '{}': {} files scanned".format(name, scanned) self.__stdout.write("\b" * self.__last_len) self.__stdout.write(progress) self.__stdout.flush() self.__last_len = len(progress) self.__last_display = time.time() class CLIParser(argparse.ArgumentParser): def error(self, message): self.print_usage(sys.stderr) raise RuntimeError(message) class SupysonicCLI(cmd.Cmd): prompt = "supysonic> " def _make_do(self, command): def method(obj, line): try: args = getattr(obj, command + "_parser").parse_args(shlex.split(line)) except RuntimeError as e: self.write_error_line(str(e)) return if hasattr(obj.__class__, command + "_subparsers"): try: func = getattr(obj, "{}_{}".format(command, args.action)) except AttributeError: return obj.default(line) return func( **{key: vars(args)[key] for key in vars(args) if key != "action"} ) else: try: func = getattr(obj, command) except AttributeError: return obj.default(line) return func(**vars(args)) return method def __init__(self, config, stderr=None, *args, **kwargs): cmd.Cmd.__init__(self, *args, **kwargs) if stderr is not None: self.stderr = stderr else: self.stderr = sys.stderr self.__config = config self.__daemon = DaemonClient(config.DAEMON["socket"]) # Generate do_* and help_* methods for parser_name in filter( lambda attr: attr.endswith("_parser") and "_" not in attr[:-7], dir(self.__class__), ): command = parser_name[:-7] if not hasattr(self.__class__, "do_" + command): setattr(self.__class__, "do_" + command, self._make_do(command)) if hasattr(self.__class__, "do_" + command) and not hasattr( self.__class__, "help_" + command ): setattr( self.__class__, "help_" + command, getattr(self.__class__, parser_name).print_help, ) if hasattr(self.__class__, command + "_subparsers"): for action, subparser in getattr( self.__class__, command + "_subparsers" ).choices.items(): setattr( self, "help_{} {}".format(command, action), subparser.print_help ) def write_line(self, line=""): self.stdout.write(line + "\n") def write_error_line(self, line=""): self.stderr.write(line + "\n") def do_EOF(self, line): return True do_exit = do_EOF def default(self, line): self.write_line("Unknown command %s" % line.split()[0]) self.do_help(None) def postloop(self): self.write_line() def completedefault(self, text, line, begidx, endidx): command = line.split()[0] parsers = getattr(self.__class__, command + "_subparsers", None) if not parsers: return [] num_words = len(line[len(command) : begidx].split()) if num_words == 0: return [a for a in parsers.choices if a.startswith(text)] return [] folder_parser = CLIParser(prog="folder", add_help=False) folder_subparsers = folder_parser.add_subparsers(dest="action") folder_subparsers.add_parser("list", help="Lists folders", add_help=False) folder_add_parser = folder_subparsers.add_parser( "add", help="Adds a folder", add_help=False ) folder_add_parser.add_argument("name", help="Name of the folder to add") folder_add_parser.add_argument( "path", help="Path to the directory pointed by the folder" ) folder_del_parser = folder_subparsers.add_parser( "delete", help="Deletes a folder", add_help=False ) folder_del_parser.add_argument("name", help="Name of the folder to delete") folder_scan_parser = folder_subparsers.add_parser( "scan", help="Run a scan on specified folders", add_help=False ) folder_scan_parser.add_argument( "folders", metavar="folder", nargs="*", help="Folder(s) to be scanned. If ommitted, all folders are scanned", ) folder_scan_parser.add_argument( "-f", "--force", action="store_true", help="Force scan of already know files even if they haven't changed", ) folder_scan_target_group = folder_scan_parser.add_mutually_exclusive_group() folder_scan_target_group.add_argument( "--background", action="store_true", help="Scan the folder(s) in the background. Requires the daemon to be running.", ) folder_scan_target_group.add_argument( "--foreground", action="store_true", help="Scan the folder(s) in the foreground, blocking the processus while the scan is running.", ) @db_session def folder_list(self): self.write_line("Name\t\tPath\n----\t\t----") self.write_line( "\n".join( "{: <16}{}".format(f.name, f.path) for f in Folder.select(lambda f: f.root) ) ) @db_session def folder_add(self, name, path): try: FolderManager.add(name, path) self.write_line("Folder '{}' added".format(name)) except ValueError as e: self.write_error_line(str(e)) @db_session def folder_delete(self, name): try: FolderManager.delete_by_name(name) self.write_line("Deleted folder '{}'".format(name)) except ObjectNotFound as e: self.write_error_line(str(e)) def folder_scan(self, folders, force, background, foreground): auto = not background and not foreground if auto: try: self.__folder_scan_background(folders, force) except DaemonUnavailableError: self.write_error_line( "Couldn't connect to the daemon, scanning in foreground" ) self.__folder_scan_foreground(folders, force) elif background: try: self.__folder_scan_background(folders, force) except DaemonUnavailableError: self.write_error_line( "Couldn't connect to the daemon, please use the '--foreground' option" ) elif foreground: self.__folder_scan_foreground(folders, force) def __folder_scan_background(self, folders, force): self.__daemon.scan(folders, force) def __folder_scan_foreground(self, folders, force): try: progress = self.__daemon.get_scanning_progress() if progress is not None: self.write_error_line( "The daemon is currently scanning, can't start a scan now" ) return except DaemonUnavailableError: pass extensions = self.__config.BASE["scanner_extensions"] if extensions: extensions = extensions.split(" ") scanner = Scanner( force=force, extensions=extensions, follow_symlinks=self.__config.BASE["follow_symlinks"], progress=TimedProgressDisplay(self.stdout), on_folder_start=self.__unwatch_folder, on_folder_end=self.__watch_folder, ) if folders: fstrs = folders with db_session: folders = select(f.name for f in Folder if f.root and f.name in fstrs)[ : ] notfound = set(fstrs) - set(folders) if notfound: self.write_line("No such folder(s): " + " ".join(notfound)) for folder in folders: scanner.queue_folder(folder) else: with db_session: for folder in select(f.name for f in Folder if f.root): scanner.queue_folder(folder) scanner.run() stats = scanner.stats() self.write_line("\nScanning done") self.write_line( "Added: {0.artists} artists, {0.albums} albums, {0.tracks} tracks".format( stats.added ) ) self.write_line( "Deleted: {0.artists} artists, {0.albums} albums, {0.tracks} tracks".format( stats.deleted ) ) if stats.errors: self.write_line("Errors in:") for err in stats.errors: self.write_line("- " + err) def __unwatch_folder(self, folder): try: self.__daemon.remove_watched_folder(folder.path) except DaemonUnavailableError: pass def __watch_folder(self, folder): try: self.__daemon.add_watched_folder(folder.path) except DaemonUnavailableError: pass user_parser = CLIParser(prog="user", add_help=False) user_subparsers = user_parser.add_subparsers(dest="action") user_subparsers.add_parser("list", help="List users", add_help=False) user_add_parser = user_subparsers.add_parser( "add", help="Adds a user", add_help=False ) user_add_parser.add_argument("name", help="Name/login of the user to add") user_add_parser.add_argument( "-p", "--password", help="Specifies the user's password" ) user_add_parser.add_argument( "-e", "--email", default="", help="Sets the user's email address" ) user_del_parser = user_subparsers.add_parser( "delete", help="Deletes a user", add_help=False ) user_del_parser.add_argument("name", help="Name/login of the user to delete") user_roles_parser = user_subparsers.add_parser( "setroles", help="Enable/disable rights for a user", add_help=False ) user_roles_parser.add_argument( "name", help="Name/login of the user to grant/revoke admin rights" ) user_roles_admin_group = user_roles_parser.add_mutually_exclusive_group() user_roles_admin_group.add_argument( "-A", "--admin", action="store_true", help="Grant admin rights" ) user_roles_admin_group.add_argument( "-a", "--noadmin", action="store_true", help="Revoke admin rights" ) user_roles_jukebox_group = user_roles_parser.add_mutually_exclusive_group() user_roles_jukebox_group.add_argument( "-J", "--jukebox", action="store_true", help="Grant jukebox rights" ) user_roles_jukebox_group.add_argument( "-j", "--nojukebox", action="store_true", help="Revoke jukebox rights" ) user_pass_parser = user_subparsers.add_parser( "changepass", help="Changes a user's password", add_help=False ) user_pass_parser.add_argument( "name", help="Name/login of the user to which change the password" ) user_pass_parser.add_argument("password", nargs="?", help="New password") user_rename_parser = user_subparsers.add_parser( "rename", help="Rename a user", add_help=False ) user_rename_parser.add_argument("name", help="Name of the user to rename") user_rename_parser.add_argument("newname", help="New name for the user") @db_session def user_list(self): self.write_line("Name\t\tAdmin\tJukebox\tEmail") self.write_line("----\t\t-----\t-------\t-----") self.write_line( "\n".join( "{: <16}{}\t{}\t{}".format( u.name, "*" if u.admin else "", "*" if u.jukebox else "", u.mail ) for u in User.select() ) ) def _ask_password(self): # pragma: nocover password = getpass.getpass() confirm = getpass.getpass("Confirm password: ") if password != confirm: raise ValueError("Passwords don't match") return password @db_session def user_add(self, name, password, email): try: if not password: password = self._ask_password() # pragma: nocover UserManager.add(name, password, mail=email) except ValueError as e: self.write_error_line(str(e)) @db_session def user_delete(self, name): try: UserManager.delete_by_name(name) self.write_line("Deleted user '{}'".format(name)) except ObjectNotFound as e: self.write_error_line(str(e)) @db_session def user_setroles(self, name, admin, noadmin, jukebox, nojukebox): user = User.get(name=name) if user is None: self.write_error_line("No such user") else: if admin: user.admin = True self.write_line("Granted '{}' admin rights".format(name)) elif noadmin: user.admin = False self.write_line("Revoked '{}' admin rights".format(name)) if jukebox: user.jukebox = True self.write_line("Granted '{}' jukebox rights".format(name)) elif nojukebox: user.jukebox = False self.write_line("Revoked '{}' jukebox rights".format(name)) @db_session def user_changepass(self, name, password): try: if not password: password = self._ask_password() # pragma: nocover UserManager.change_password2(name, password) self.write_line("Successfully changed '{}' password".format(name)) except ObjectNotFound as e: self.write_error_line(str(e)) @db_session def user_rename(self, name, newname): if not name or not newname: self.write_error_line("Missing user current name or new name") return if name == newname: return user = User.get(name=name) if user is None: self.write_error_line("No such user") return if User.get(name=newname) is not None: self.write_error_line("This name is already taken") return user.name = newname self.write_line("User '{}' renamed to '{}'".format(name, newname)) def main(): config = IniConfig.from_common_locations() init_database(config.BASE["database_uri"]) cli = SupysonicCLI(config) if len(sys.argv) > 1: cli.onecmd(" ".join(shlex.quote(arg) for arg in sys.argv[1:])) else: cli.cmdloop() release_database() if __name__ == "__main__": main()
agpl-3.0
willingc/oh-mainline
vendor/packages/twisted/twisted/test/test_digestauth.py
29
23460
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.cred._digest} and the associated bits in L{twisted.cred.credentials}. """ from zope.interface.verify import verifyObject from twisted.trial.unittest import TestCase from twisted.python.hashlib import md5, sha1 from twisted.internet.address import IPv4Address from twisted.cred.error import LoginFailed from twisted.cred.credentials import calcHA1, calcHA2, IUsernameDigestHash from twisted.cred.credentials import calcResponse, DigestCredentialFactory def b64encode(s): return s.encode('base64').strip() class FakeDigestCredentialFactory(DigestCredentialFactory): """ A Fake Digest Credential Factory that generates a predictable nonce and opaque """ def __init__(self, *args, **kwargs): super(FakeDigestCredentialFactory, self).__init__(*args, **kwargs) self.privateKey = "0" def _generateNonce(self): """ Generate a static nonce """ return '178288758716122392881254770685' def _getTime(self): """ Return a stable time """ return 0 class DigestAuthTests(TestCase): """ L{TestCase} mixin class which defines a number of tests for L{DigestCredentialFactory}. Because this mixin defines C{setUp}, it must be inherited before L{TestCase}. """ def setUp(self): """ Create a DigestCredentialFactory for testing """ self.username = "foobar" self.password = "bazquux" self.realm = "test realm" self.algorithm = "md5" self.cnonce = "29fc54aa1641c6fa0e151419361c8f23" self.qop = "auth" self.uri = "/write/" self.clientAddress = IPv4Address('TCP', '10.2.3.4', 43125) self.method = 'GET' self.credentialFactory = DigestCredentialFactory( self.algorithm, self.realm) def test_MD5HashA1(self, _algorithm='md5', _hash=md5): """ L{calcHA1} accepts the C{'md5'} algorithm and returns an MD5 hash of its parameters, excluding the nonce and cnonce. """ nonce = 'abc123xyz' hashA1 = calcHA1(_algorithm, self.username, self.realm, self.password, nonce, self.cnonce) a1 = '%s:%s:%s' % (self.username, self.realm, self.password) expected = _hash(a1).hexdigest() self.assertEqual(hashA1, expected) def test_MD5SessionHashA1(self): """ L{calcHA1} accepts the C{'md5-sess'} algorithm and returns an MD5 hash of its parameters, including the nonce and cnonce. """ nonce = 'xyz321abc' hashA1 = calcHA1('md5-sess', self.username, self.realm, self.password, nonce, self.cnonce) a1 = '%s:%s:%s' % (self.username, self.realm, self.password) ha1 = md5(a1).digest() a1 = '%s:%s:%s' % (ha1, nonce, self.cnonce) expected = md5(a1).hexdigest() self.assertEqual(hashA1, expected) def test_SHAHashA1(self): """ L{calcHA1} accepts the C{'sha'} algorithm and returns a SHA hash of its parameters, excluding the nonce and cnonce. """ self.test_MD5HashA1('sha', sha1) def test_MD5HashA2Auth(self, _algorithm='md5', _hash=md5): """ L{calcHA2} accepts the C{'md5'} algorithm and returns an MD5 hash of its arguments, excluding the entity hash for QOP other than C{'auth-int'}. """ method = 'GET' hashA2 = calcHA2(_algorithm, method, self.uri, 'auth', None) a2 = '%s:%s' % (method, self.uri) expected = _hash(a2).hexdigest() self.assertEqual(hashA2, expected) def test_MD5HashA2AuthInt(self, _algorithm='md5', _hash=md5): """ L{calcHA2} accepts the C{'md5'} algorithm and returns an MD5 hash of its arguments, including the entity hash for QOP of C{'auth-int'}. """ method = 'GET' hentity = 'foobarbaz' hashA2 = calcHA2(_algorithm, method, self.uri, 'auth-int', hentity) a2 = '%s:%s:%s' % (method, self.uri, hentity) expected = _hash(a2).hexdigest() self.assertEqual(hashA2, expected) def test_MD5SessHashA2Auth(self): """ L{calcHA2} accepts the C{'md5-sess'} algorithm and QOP of C{'auth'} and returns the same value as it does for the C{'md5'} algorithm. """ self.test_MD5HashA2Auth('md5-sess') def test_MD5SessHashA2AuthInt(self): """ L{calcHA2} accepts the C{'md5-sess'} algorithm and QOP of C{'auth-int'} and returns the same value as it does for the C{'md5'} algorithm. """ self.test_MD5HashA2AuthInt('md5-sess') def test_SHAHashA2Auth(self): """ L{calcHA2} accepts the C{'sha'} algorithm and returns a SHA hash of its arguments, excluding the entity hash for QOP other than C{'auth-int'}. """ self.test_MD5HashA2Auth('sha', sha1) def test_SHAHashA2AuthInt(self): """ L{calcHA2} accepts the C{'sha'} algorithm and returns a SHA hash of its arguments, including the entity hash for QOP of C{'auth-int'}. """ self.test_MD5HashA2AuthInt('sha', sha1) def test_MD5HashResponse(self, _algorithm='md5', _hash=md5): """ L{calcResponse} accepts the C{'md5'} algorithm and returns an MD5 hash of its parameters, excluding the nonce count, client nonce, and QoP value if the nonce count and client nonce are C{None} """ hashA1 = 'abc123' hashA2 = '789xyz' nonce = 'lmnopq' response = '%s:%s:%s' % (hashA1, nonce, hashA2) expected = _hash(response).hexdigest() digest = calcResponse(hashA1, hashA2, _algorithm, nonce, None, None, None) self.assertEqual(expected, digest) def test_MD5SessionHashResponse(self): """ L{calcResponse} accepts the C{'md5-sess'} algorithm and returns an MD5 hash of its parameters, excluding the nonce count, client nonce, and QoP value if the nonce count and client nonce are C{None} """ self.test_MD5HashResponse('md5-sess') def test_SHAHashResponse(self): """ L{calcResponse} accepts the C{'sha'} algorithm and returns a SHA hash of its parameters, excluding the nonce count, client nonce, and QoP value if the nonce count and client nonce are C{None} """ self.test_MD5HashResponse('sha', sha1) def test_MD5HashResponseExtra(self, _algorithm='md5', _hash=md5): """ L{calcResponse} accepts the C{'md5'} algorithm and returns an MD5 hash of its parameters, including the nonce count, client nonce, and QoP value if they are specified. """ hashA1 = 'abc123' hashA2 = '789xyz' nonce = 'lmnopq' nonceCount = '00000004' clientNonce = 'abcxyz123' qop = 'auth' response = '%s:%s:%s:%s:%s:%s' % ( hashA1, nonce, nonceCount, clientNonce, qop, hashA2) expected = _hash(response).hexdigest() digest = calcResponse( hashA1, hashA2, _algorithm, nonce, nonceCount, clientNonce, qop) self.assertEqual(expected, digest) def test_MD5SessionHashResponseExtra(self): """ L{calcResponse} accepts the C{'md5-sess'} algorithm and returns an MD5 hash of its parameters, including the nonce count, client nonce, and QoP value if they are specified. """ self.test_MD5HashResponseExtra('md5-sess') def test_SHAHashResponseExtra(self): """ L{calcResponse} accepts the C{'sha'} algorithm and returns a SHA hash of its parameters, including the nonce count, client nonce, and QoP value if they are specified. """ self.test_MD5HashResponseExtra('sha', sha1) def formatResponse(self, quotes=True, **kw): """ Format all given keyword arguments and their values suitably for use as the value of an HTTP header. @types quotes: C{bool} @param quotes: A flag indicating whether to quote the values of each field in the response. @param **kw: Keywords and C{str} values which will be treated as field name/value pairs to include in the result. @rtype: C{str} @return: The given fields formatted for use as an HTTP header value. """ if 'username' not in kw: kw['username'] = self.username if 'realm' not in kw: kw['realm'] = self.realm if 'algorithm' not in kw: kw['algorithm'] = self.algorithm if 'qop' not in kw: kw['qop'] = self.qop if 'cnonce' not in kw: kw['cnonce'] = self.cnonce if 'uri' not in kw: kw['uri'] = self.uri if quotes: quote = '"' else: quote = '' return ', '.join([ '%s=%s%s%s' % (k, quote, v, quote) for (k, v) in kw.iteritems() if v is not None]) def getDigestResponse(self, challenge, ncount): """ Calculate the response for the given challenge """ nonce = challenge.get('nonce') algo = challenge.get('algorithm').lower() qop = challenge.get('qop') ha1 = calcHA1( algo, self.username, self.realm, self.password, nonce, self.cnonce) ha2 = calcHA2(algo, "GET", self.uri, qop, None) expected = calcResponse(ha1, ha2, algo, nonce, ncount, self.cnonce, qop) return expected def test_response(self, quotes=True): """ L{DigestCredentialFactory.decode} accepts a digest challenge response and parses it into an L{IUsernameHashedPassword} provider. """ challenge = self.credentialFactory.getChallenge(self.clientAddress.host) nc = "00000001" clientResponse = self.formatResponse( quotes=quotes, nonce=challenge['nonce'], response=self.getDigestResponse(challenge, nc), nc=nc, opaque=challenge['opaque']) creds = self.credentialFactory.decode( clientResponse, self.method, self.clientAddress.host) self.assertTrue(creds.checkPassword(self.password)) self.assertFalse(creds.checkPassword(self.password + 'wrong')) def test_responseWithoutQuotes(self): """ L{DigestCredentialFactory.decode} accepts a digest challenge response which does not quote the values of its fields and parses it into an L{IUsernameHashedPassword} provider in the same way it would a response which included quoted field values. """ self.test_response(False) def test_caseInsensitiveAlgorithm(self): """ The case of the algorithm value in the response is ignored when checking the credentials. """ self.algorithm = 'MD5' self.test_response() def test_md5DefaultAlgorithm(self): """ The algorithm defaults to MD5 if it is not supplied in the response. """ self.algorithm = None self.test_response() def test_responseWithoutClientIP(self): """ L{DigestCredentialFactory.decode} accepts a digest challenge response even if the client address it is passed is C{None}. """ challenge = self.credentialFactory.getChallenge(None) nc = "00000001" clientResponse = self.formatResponse( nonce=challenge['nonce'], response=self.getDigestResponse(challenge, nc), nc=nc, opaque=challenge['opaque']) creds = self.credentialFactory.decode(clientResponse, self.method, None) self.assertTrue(creds.checkPassword(self.password)) self.assertFalse(creds.checkPassword(self.password + 'wrong')) def test_multiResponse(self): """ L{DigestCredentialFactory.decode} handles multiple responses to a single challenge. """ challenge = self.credentialFactory.getChallenge(self.clientAddress.host) nc = "00000001" clientResponse = self.formatResponse( nonce=challenge['nonce'], response=self.getDigestResponse(challenge, nc), nc=nc, opaque=challenge['opaque']) creds = self.credentialFactory.decode(clientResponse, self.method, self.clientAddress.host) self.assertTrue(creds.checkPassword(self.password)) self.assertFalse(creds.checkPassword(self.password + 'wrong')) nc = "00000002" clientResponse = self.formatResponse( nonce=challenge['nonce'], response=self.getDigestResponse(challenge, nc), nc=nc, opaque=challenge['opaque']) creds = self.credentialFactory.decode(clientResponse, self.method, self.clientAddress.host) self.assertTrue(creds.checkPassword(self.password)) self.assertFalse(creds.checkPassword(self.password + 'wrong')) def test_failsWithDifferentMethod(self): """ L{DigestCredentialFactory.decode} returns an L{IUsernameHashedPassword} provider which rejects a correct password for the given user if the challenge response request is made using a different HTTP method than was used to request the initial challenge. """ challenge = self.credentialFactory.getChallenge(self.clientAddress.host) nc = "00000001" clientResponse = self.formatResponse( nonce=challenge['nonce'], response=self.getDigestResponse(challenge, nc), nc=nc, opaque=challenge['opaque']) creds = self.credentialFactory.decode(clientResponse, 'POST', self.clientAddress.host) self.assertFalse(creds.checkPassword(self.password)) self.assertFalse(creds.checkPassword(self.password + 'wrong')) def test_noUsername(self): """ L{DigestCredentialFactory.decode} raises L{LoginFailed} if the response has no username field or if the username field is empty. """ # Check for no username e = self.assertRaises( LoginFailed, self.credentialFactory.decode, self.formatResponse(username=None), self.method, self.clientAddress.host) self.assertEqual(str(e), "Invalid response, no username given.") # Check for an empty username e = self.assertRaises( LoginFailed, self.credentialFactory.decode, self.formatResponse(username=""), self.method, self.clientAddress.host) self.assertEqual(str(e), "Invalid response, no username given.") def test_noNonce(self): """ L{DigestCredentialFactory.decode} raises L{LoginFailed} if the response has no nonce. """ e = self.assertRaises( LoginFailed, self.credentialFactory.decode, self.formatResponse(opaque="abc123"), self.method, self.clientAddress.host) self.assertEqual(str(e), "Invalid response, no nonce given.") def test_noOpaque(self): """ L{DigestCredentialFactory.decode} raises L{LoginFailed} if the response has no opaque. """ e = self.assertRaises( LoginFailed, self.credentialFactory.decode, self.formatResponse(), self.method, self.clientAddress.host) self.assertEqual(str(e), "Invalid response, no opaque given.") def test_checkHash(self): """ L{DigestCredentialFactory.decode} returns an L{IUsernameDigestHash} provider which can verify a hash of the form 'username:realm:password'. """ challenge = self.credentialFactory.getChallenge(self.clientAddress.host) nc = "00000001" clientResponse = self.formatResponse( nonce=challenge['nonce'], response=self.getDigestResponse(challenge, nc), nc=nc, opaque=challenge['opaque']) creds = self.credentialFactory.decode(clientResponse, self.method, self.clientAddress.host) self.assertTrue(verifyObject(IUsernameDigestHash, creds)) cleartext = '%s:%s:%s' % (self.username, self.realm, self.password) hash = md5(cleartext) self.assertTrue(creds.checkHash(hash.hexdigest())) hash.update('wrong') self.assertFalse(creds.checkHash(hash.hexdigest())) def test_invalidOpaque(self): """ L{DigestCredentialFactory.decode} raises L{LoginFailed} when the opaque value does not contain all the required parts. """ credentialFactory = FakeDigestCredentialFactory(self.algorithm, self.realm) challenge = credentialFactory.getChallenge(self.clientAddress.host) exc = self.assertRaises( LoginFailed, credentialFactory._verifyOpaque, 'badOpaque', challenge['nonce'], self.clientAddress.host) self.assertEqual(str(exc), 'Invalid response, invalid opaque value') badOpaque = 'foo-' + b64encode('nonce,clientip') exc = self.assertRaises( LoginFailed, credentialFactory._verifyOpaque, badOpaque, challenge['nonce'], self.clientAddress.host) self.assertEqual(str(exc), 'Invalid response, invalid opaque value') exc = self.assertRaises( LoginFailed, credentialFactory._verifyOpaque, '', challenge['nonce'], self.clientAddress.host) self.assertEqual(str(exc), 'Invalid response, invalid opaque value') badOpaque = ( 'foo-' + b64encode('%s,%s,foobar' % ( challenge['nonce'], self.clientAddress.host))) exc = self.assertRaises( LoginFailed, credentialFactory._verifyOpaque, badOpaque, challenge['nonce'], self.clientAddress.host) self.assertEqual( str(exc), 'Invalid response, invalid opaque/time values') def test_incompatibleNonce(self): """ L{DigestCredentialFactory.decode} raises L{LoginFailed} when the given nonce from the response does not match the nonce encoded in the opaque. """ credentialFactory = FakeDigestCredentialFactory(self.algorithm, self.realm) challenge = credentialFactory.getChallenge(self.clientAddress.host) badNonceOpaque = credentialFactory._generateOpaque( '1234567890', self.clientAddress.host) exc = self.assertRaises( LoginFailed, credentialFactory._verifyOpaque, badNonceOpaque, challenge['nonce'], self.clientAddress.host) self.assertEqual( str(exc), 'Invalid response, incompatible opaque/nonce values') exc = self.assertRaises( LoginFailed, credentialFactory._verifyOpaque, badNonceOpaque, '', self.clientAddress.host) self.assertEqual( str(exc), 'Invalid response, incompatible opaque/nonce values') def test_incompatibleClientIP(self): """ L{DigestCredentialFactory.decode} raises L{LoginFailed} when the request comes from a client IP other than what is encoded in the opaque. """ credentialFactory = FakeDigestCredentialFactory(self.algorithm, self.realm) challenge = credentialFactory.getChallenge(self.clientAddress.host) badAddress = '10.0.0.1' # Sanity check self.assertNotEqual(self.clientAddress.host, badAddress) badNonceOpaque = credentialFactory._generateOpaque( challenge['nonce'], badAddress) self.assertRaises( LoginFailed, credentialFactory._verifyOpaque, badNonceOpaque, challenge['nonce'], self.clientAddress.host) def test_oldNonce(self): """ L{DigestCredentialFactory.decode} raises L{LoginFailed} when the given opaque is older than C{DigestCredentialFactory.CHALLENGE_LIFETIME_SECS} """ credentialFactory = FakeDigestCredentialFactory(self.algorithm, self.realm) challenge = credentialFactory.getChallenge(self.clientAddress.host) key = '%s,%s,%s' % (challenge['nonce'], self.clientAddress.host, '-137876876') digest = md5(key + credentialFactory.privateKey).hexdigest() ekey = b64encode(key) oldNonceOpaque = '%s-%s' % (digest, ekey.strip('\n')) self.assertRaises( LoginFailed, credentialFactory._verifyOpaque, oldNonceOpaque, challenge['nonce'], self.clientAddress.host) def test_mismatchedOpaqueChecksum(self): """ L{DigestCredentialFactory.decode} raises L{LoginFailed} when the opaque checksum fails verification. """ credentialFactory = FakeDigestCredentialFactory(self.algorithm, self.realm) challenge = credentialFactory.getChallenge(self.clientAddress.host) key = '%s,%s,%s' % (challenge['nonce'], self.clientAddress.host, '0') digest = md5(key + 'this is not the right pkey').hexdigest() badChecksum = '%s-%s' % (digest, b64encode(key)) self.assertRaises( LoginFailed, credentialFactory._verifyOpaque, badChecksum, challenge['nonce'], self.clientAddress.host) def test_incompatibleCalcHA1Options(self): """ L{calcHA1} raises L{TypeError} when any of the pszUsername, pszRealm, or pszPassword arguments are specified with the preHA1 keyword argument. """ arguments = ( ("user", "realm", "password", "preHA1"), (None, "realm", None, "preHA1"), (None, None, "password", "preHA1"), ) for pszUsername, pszRealm, pszPassword, preHA1 in arguments: self.assertRaises( TypeError, calcHA1, "md5", pszUsername, pszRealm, pszPassword, "nonce", "cnonce", preHA1=preHA1) def test_noNewlineOpaque(self): """ L{DigestCredentialFactory._generateOpaque} returns a value without newlines, regardless of the length of the nonce. """ opaque = self.credentialFactory._generateOpaque( "long nonce " * 10, None) self.assertNotIn('\n', opaque)
agpl-3.0
kashyap32/scrapy
tests/test_webclient.py
112
12875
""" from twisted.internet import defer Tests borrowed from the twisted.web.client tests. """ import os from six.moves.urllib.parse import urlparse from twisted.trial import unittest from twisted.web import server, static, error, util from twisted.internet import reactor, defer from twisted.test.proto_helpers import StringTransport from twisted.python.filepath import FilePath from twisted.protocols.policies import WrappingFactory from scrapy.core.downloader import webclient as client from scrapy.http import Request, Headers def getPage(url, contextFactory=None, *args, **kwargs): """Adapted version of twisted.web.client.getPage""" def _clientfactory(*args, **kwargs): timeout = kwargs.pop('timeout', 0) f = client.ScrapyHTTPClientFactory(Request(*args, **kwargs), timeout=timeout) f.deferred.addCallback(lambda r: r.body) return f from twisted.web.client import _makeGetterFactory return _makeGetterFactory(url, _clientfactory, contextFactory=contextFactory, *args, **kwargs).deferred class ParseUrlTestCase(unittest.TestCase): """Test URL parsing facility and defaults values.""" def _parse(self, url): f = client.ScrapyHTTPClientFactory(Request(url)) return (f.scheme, f.netloc, f.host, f.port, f.path) def testParse(self): lip = '127.0.0.1' tests = ( ("http://127.0.0.1?c=v&c2=v2#fragment", ('http', lip, lip, 80, '/?c=v&c2=v2')), ("http://127.0.0.1/?c=v&c2=v2#fragment", ('http', lip, lip, 80, '/?c=v&c2=v2')), ("http://127.0.0.1/foo?c=v&c2=v2#frag", ('http', lip, lip, 80, '/foo?c=v&c2=v2')), ("http://127.0.0.1:100?c=v&c2=v2#fragment", ('http', lip+':100', lip, 100, '/?c=v&c2=v2')), ("http://127.0.0.1:100/?c=v&c2=v2#frag", ('http', lip+':100', lip, 100, '/?c=v&c2=v2')), ("http://127.0.0.1:100/foo?c=v&c2=v2#frag", ('http', lip+':100', lip, 100, '/foo?c=v&c2=v2')), ("http://127.0.0.1", ('http', lip, lip, 80, '/')), ("http://127.0.0.1/", ('http', lip, lip, 80, '/')), ("http://127.0.0.1/foo", ('http', lip, lip, 80, '/foo')), ("http://127.0.0.1?param=value", ('http', lip, lip, 80, '/?param=value')), ("http://127.0.0.1/?param=value", ('http', lip, lip, 80, '/?param=value')), ("http://127.0.0.1:12345/foo", ('http', lip+':12345', lip, 12345, '/foo')), ("http://spam:12345/foo", ('http', 'spam:12345', 'spam', 12345, '/foo')), ("http://spam.test.org/foo", ('http', 'spam.test.org', 'spam.test.org', 80, '/foo')), ("https://127.0.0.1/foo", ('https', lip, lip, 443, '/foo')), ("https://127.0.0.1/?param=value", ('https', lip, lip, 443, '/?param=value')), ("https://127.0.0.1:12345/", ('https', lip+':12345', lip, 12345, '/')), ("http://scrapytest.org/foo ", ('http', 'scrapytest.org', 'scrapytest.org', 80, '/foo')), ("http://egg:7890 ", ('http', 'egg:7890', 'egg', 7890, '/')), ) for url, test in tests: self.assertEquals(client._parse(url), test, url) def test_externalUnicodeInterference(self): """ L{client._parse} should return C{str} for the scheme, host, and path elements of its return tuple, even when passed an URL which has previously been passed to L{urlparse} as a C{unicode} string. """ badInput = u'http://example.com/path' goodInput = badInput.encode('ascii') urlparse(badInput) scheme, netloc, host, port, path = self._parse(goodInput) self.assertTrue(isinstance(scheme, str)) self.assertTrue(isinstance(netloc, str)) self.assertTrue(isinstance(host, str)) self.assertTrue(isinstance(path, str)) self.assertTrue(isinstance(port, int)) class ScrapyHTTPPageGetterTests(unittest.TestCase): def test_earlyHeaders(self): # basic test stolen from twisted HTTPageGetter factory = client.ScrapyHTTPClientFactory(Request( url='http://foo/bar', body="some data", headers={ 'Host': 'example.net', 'User-Agent': 'fooble', 'Cookie': 'blah blah', 'Content-Length': '12981', 'Useful': 'value'})) self._test(factory, "GET /bar HTTP/1.0\r\n" "Content-Length: 9\r\n" "Useful: value\r\n" "Connection: close\r\n" "User-Agent: fooble\r\n" "Host: example.net\r\n" "Cookie: blah blah\r\n" "\r\n" "some data") # test minimal sent headers factory = client.ScrapyHTTPClientFactory(Request('http://foo/bar')) self._test(factory, "GET /bar HTTP/1.0\r\n" "Host: foo\r\n" "\r\n") # test a simple POST with body and content-type factory = client.ScrapyHTTPClientFactory(Request( method='POST', url='http://foo/bar', body='name=value', headers={'Content-Type': 'application/x-www-form-urlencoded'})) self._test(factory, "POST /bar HTTP/1.0\r\n" "Host: foo\r\n" "Connection: close\r\n" "Content-Type: application/x-www-form-urlencoded\r\n" "Content-Length: 10\r\n" "\r\n" "name=value") # test a POST method with no body provided factory = client.ScrapyHTTPClientFactory(Request( method='POST', url='http://foo/bar' )) self._test(factory, "POST /bar HTTP/1.0\r\n" "Host: foo\r\n" "Content-Length: 0\r\n" "\r\n") # test with single and multivalued headers factory = client.ScrapyHTTPClientFactory(Request( url='http://foo/bar', headers={ 'X-Meta-Single': 'single', 'X-Meta-Multivalued': ['value1', 'value2'], })) self._test(factory, "GET /bar HTTP/1.0\r\n" "Host: foo\r\n" "X-Meta-Multivalued: value1\r\n" "X-Meta-Multivalued: value2\r\n" "X-Meta-Single: single\r\n" "\r\n") # same test with single and multivalued headers but using Headers class factory = client.ScrapyHTTPClientFactory(Request( url='http://foo/bar', headers=Headers({ 'X-Meta-Single': 'single', 'X-Meta-Multivalued': ['value1', 'value2'], }))) self._test(factory, "GET /bar HTTP/1.0\r\n" "Host: foo\r\n" "X-Meta-Multivalued: value1\r\n" "X-Meta-Multivalued: value2\r\n" "X-Meta-Single: single\r\n" "\r\n") def _test(self, factory, testvalue): transport = StringTransport() protocol = client.ScrapyHTTPPageGetter() protocol.factory = factory protocol.makeConnection(transport) self.assertEqual( set(transport.value().splitlines()), set(testvalue.splitlines())) return testvalue def test_non_standard_line_endings(self): # regression test for: http://dev.scrapy.org/ticket/258 factory = client.ScrapyHTTPClientFactory(Request( url='http://foo/bar')) protocol = client.ScrapyHTTPPageGetter() protocol.factory = factory protocol.headers = Headers() protocol.dataReceived("HTTP/1.0 200 OK\n") protocol.dataReceived("Hello: World\n") protocol.dataReceived("Foo: Bar\n") protocol.dataReceived("\n") self.assertEqual(protocol.headers, Headers({'Hello': ['World'], 'Foo': ['Bar']})) from twisted.web.test.test_webclient import ForeverTakingResource, \ ErrorResource, NoLengthResource, HostHeaderResource, \ PayloadResource, BrokenDownloadResource class WebClientTestCase(unittest.TestCase): def _listen(self, site): return reactor.listenTCP(0, site, interface="127.0.0.1") def setUp(self): name = self.mktemp() os.mkdir(name) FilePath(name).child("file").setContent("0123456789") r = static.File(name) r.putChild("redirect", util.Redirect("/file")) r.putChild("wait", ForeverTakingResource()) r.putChild("error", ErrorResource()) r.putChild("nolength", NoLengthResource()) r.putChild("host", HostHeaderResource()) r.putChild("payload", PayloadResource()) r.putChild("broken", BrokenDownloadResource()) self.site = server.Site(r, timeout=None) self.wrapper = WrappingFactory(self.site) self.port = self._listen(self.wrapper) self.portno = self.port.getHost().port def tearDown(self): return self.port.stopListening() def getURL(self, path): return "http://127.0.0.1:%d/%s" % (self.portno, path) def testPayload(self): s = "0123456789" * 10 return getPage(self.getURL("payload"), body=s).addCallback(self.assertEquals, s) def testHostHeader(self): # if we pass Host header explicitly, it should be used, otherwise # it should extract from url return defer.gatherResults([ getPage(self.getURL("host")).addCallback(self.assertEquals, "127.0.0.1:%d" % self.portno), getPage(self.getURL("host"), headers={"Host": "www.example.com"}).addCallback(self.assertEquals, "www.example.com")]) def test_getPage(self): """ L{client.getPage} returns a L{Deferred} which is called back with the body of the response if the default method B{GET} is used. """ d = getPage(self.getURL("file")) d.addCallback(self.assertEquals, "0123456789") return d def test_getPageHead(self): """ L{client.getPage} returns a L{Deferred} which is called back with the empty string if the method is C{HEAD} and there is a successful response code. """ def _getPage(method): return getPage(self.getURL("file"), method=method) return defer.gatherResults([ _getPage("head").addCallback(self.assertEqual, ""), _getPage("HEAD").addCallback(self.assertEqual, "")]) def test_timeoutNotTriggering(self): """ When a non-zero timeout is passed to L{getPage} and the page is retrieved before the timeout period elapses, the L{Deferred} is called back with the contents of the page. """ d = getPage(self.getURL("host"), timeout=100) d.addCallback(self.assertEquals, "127.0.0.1:%d" % self.portno) return d def test_timeoutTriggering(self): """ When a non-zero timeout is passed to L{getPage} and that many seconds elapse before the server responds to the request. the L{Deferred} is errbacked with a L{error.TimeoutError}. """ finished = self.assertFailure( getPage(self.getURL("wait"), timeout=0.000001), defer.TimeoutError) def cleanup(passthrough): # Clean up the server which is hanging around not doing # anything. connected = self.wrapper.protocols.keys() # There might be nothing here if the server managed to already see # that the connection was lost. if connected: connected[0].transport.loseConnection() return passthrough finished.addBoth(cleanup) return finished def testNotFound(self): return getPage(self.getURL('notsuchfile')).addCallback(self._cbNoSuchFile) def _cbNoSuchFile(self, pageData): self.assert_('404 - No Such Resource' in pageData) def testFactoryInfo(self): url = self.getURL('file') scheme, netloc, host, port, path = client._parse(url) factory = client.ScrapyHTTPClientFactory(Request(url)) reactor.connectTCP(host, port, factory) return factory.deferred.addCallback(self._cbFactoryInfo, factory) def _cbFactoryInfo(self, ignoredResult, factory): self.assertEquals(factory.status, '200') self.assert_(factory.version.startswith('HTTP/')) self.assertEquals(factory.message, 'OK') self.assertEquals(factory.response_headers['content-length'], '10') def testRedirect(self): return getPage(self.getURL("redirect")).addCallback(self._cbRedirect) def _cbRedirect(self, pageData): self.assertEquals(pageData, '\n<html>\n <head>\n <meta http-equiv="refresh" content="0;URL=/file">\n' ' </head>\n <body bgcolor="#FFFFFF" text="#000000">\n ' '<a href="/file">click here</a>\n </body>\n</html>\n')
bsd-3-clause
javierTerry/PyGithub
github/Legacy.py
72
7248
# -*- coding: utf-8 -*- # ########################## Copyrights and license ############################ # # # Copyright 2012 Steve English <[email protected]> # # Copyright 2012 Vincent Jacques <[email protected]> # # Copyright 2012 Zearin <[email protected]> # # Copyright 2013 AKFish <[email protected]> # # Copyright 2013 Vincent Jacques <[email protected]> # # # # This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # # ############################################################################## import urlparse import github.PaginatedList class PaginatedList(github.PaginatedList.PaginatedListBase): def __init__(self, url, args, requester, key, convert, contentClass): github.PaginatedList.PaginatedListBase.__init__(self) self.__url = url self.__args = args self.__requester = requester self.__key = key self.__convert = convert self.__contentClass = contentClass self.__nextPage = 0 self.__continue = True def _couldGrow(self): return self.__continue def _fetchNextPage(self): page = self.__nextPage self.__nextPage += 1 return self.get_page(page) def get_page(self, page): assert isinstance(page, (int, long)), page args = dict(self.__args) if page != 0: args["start_page"] = page + 1 headers, data = self.__requester.requestJsonAndCheck( "GET", self.__url, parameters=args ) self.__continue = len(data[self.__key]) > 0 return [ self.__contentClass(self.__requester, headers, self.__convert(element), completed=False) for element in data[self.__key] ] def convertUser(attributes): convertedAttributes = { "login": attributes["login"], "url": "/users/" + attributes["login"], } if "gravatar_id" in attributes: # pragma no branch convertedAttributes["gravatar_id"] = attributes["gravatar_id"] if "followers" in attributes: # pragma no branch convertedAttributes["followers"] = attributes["followers"] if "repos" in attributes: # pragma no branch convertedAttributes["public_repos"] = attributes["repos"] if "name" in attributes: # pragma no branch convertedAttributes["name"] = attributes["name"] if "created_at" in attributes: # pragma no branch convertedAttributes["created_at"] = attributes["created_at"] if "location" in attributes: # pragma no branch convertedAttributes["location"] = attributes["location"] return convertedAttributes def convertRepo(attributes): convertedAttributes = { "owner": {"login": attributes["owner"], "url": "/users/" + attributes["owner"]}, "url": "/repos/" + attributes["owner"] + "/" + attributes["name"], } if "pushed_at" in attributes: # pragma no branch convertedAttributes["pushed_at"] = attributes["pushed_at"] if "homepage" in attributes: # pragma no branch convertedAttributes["homepage"] = attributes["homepage"] if "created_at" in attributes: # pragma no branch convertedAttributes["created_at"] = attributes["created_at"] if "watchers" in attributes: # pragma no branch convertedAttributes["watchers"] = attributes["watchers"] if "has_downloads" in attributes: # pragma no branch convertedAttributes["has_downloads"] = attributes["has_downloads"] if "fork" in attributes: # pragma no branch convertedAttributes["fork"] = attributes["fork"] if "has_issues" in attributes: # pragma no branch convertedAttributes["has_issues"] = attributes["has_issues"] if "has_wiki" in attributes: # pragma no branch convertedAttributes["has_wiki"] = attributes["has_wiki"] if "forks" in attributes: # pragma no branch convertedAttributes["forks"] = attributes["forks"] if "size" in attributes: # pragma no branch convertedAttributes["size"] = attributes["size"] if "private" in attributes: # pragma no branch convertedAttributes["private"] = attributes["private"] if "open_issues" in attributes: # pragma no branch convertedAttributes["open_issues"] = attributes["open_issues"] if "description" in attributes: # pragma no branch convertedAttributes["description"] = attributes["description"] if "language" in attributes: # pragma no branch convertedAttributes["language"] = attributes["language"] if "name" in attributes: # pragma no branch convertedAttributes["name"] = attributes["name"] return convertedAttributes def convertIssue(attributes): convertedAttributes = { "number": attributes["number"], "url": "/repos" + urlparse.urlparse(attributes["html_url"]).path, "user": {"login": attributes["user"], "url": "/users/" + attributes["user"]}, } if "labels" in attributes: # pragma no branch convertedAttributes["labels"] = [{"name": label} for label in attributes["labels"]] if "title" in attributes: # pragma no branch convertedAttributes["title"] = attributes["title"] if "created_at" in attributes: # pragma no branch convertedAttributes["created_at"] = attributes["created_at"] if "comments" in attributes: # pragma no branch convertedAttributes["comments"] = attributes["comments"] if "body" in attributes: # pragma no branch convertedAttributes["body"] = attributes["body"] if "updated_at" in attributes: # pragma no branch convertedAttributes["updated_at"] = attributes["updated_at"] if "state" in attributes: # pragma no branch convertedAttributes["state"] = attributes["state"] return convertedAttributes
gpl-3.0
350dotorg/Django
django/contrib/redirects/middleware.py
447
1105
from django.contrib.redirects.models import Redirect from django import http from django.conf import settings class RedirectFallbackMiddleware(object): def process_response(self, request, response): if response.status_code != 404: return response # No need to check for a redirect for non-404 responses. path = request.get_full_path() try: r = Redirect.objects.get(site__id__exact=settings.SITE_ID, old_path=path) except Redirect.DoesNotExist: r = None if r is None and settings.APPEND_SLASH: # Try removing the trailing slash. try: r = Redirect.objects.get(site__id__exact=settings.SITE_ID, old_path=path[:path.rfind('/')]+path[path.rfind('/')+1:]) except Redirect.DoesNotExist: pass if r is not None: if r.new_path == '': return http.HttpResponseGone() return http.HttpResponsePermanentRedirect(r.new_path) # No redirect was found. Return the response. return response
bsd-3-clause
quamilek/django-custard
custard/tests/settings.py
3
1394
# Django settings for testproject project. import os DIRNAME = os.path.dirname(__file__) DEBUG = True TEMPLATE_DEBUG = DEBUG DEBUG_PROPAGATE_EXCEPTIONS = True ADMINS = () MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(DIRNAME, 'db.sqlite3'), 'TEST_NAME': os.path.join(DIRNAME, 'test_db.sqlite3'), } } TIME_ZONE = 'Europe/Rome' LANGUAGE_CODE = 'en-us' SITE_ID = 1 USE_I18N = True USE_L10N = True MEDIA_ROOT = '' MEDIA_URL = '' SECRET_KEY = 'vaO4Y<g#YRWG8;Md8noiLp>.w(w~q_b=|1`?9<x>0KxA%UB!63' TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'custard.tests.urls' TEMPLATE_DIRS = () INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.admin', 'custard', 'custard.tests', ) TEST_RUNNER = 'django.test.runner.DiscoverRunner' STATIC_URL = '/static/'
mit
Xeralux/tensorflow
tensorflow/contrib/metrics/python/metrics/classification.py
111
2647
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classification metrics library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops # TODO(nsilberman): move into metrics/python/ops/ def accuracy(predictions, labels, weights=None, name=None): """Computes the percentage of times that predictions matches labels. Args: predictions: the predicted values, a `Tensor` whose dtype and shape matches 'labels'. labels: the ground truth values, a `Tensor` of any shape and bool, integer, or string dtype. weights: None or `Tensor` of float values to reweight the accuracy. name: A name for the operation (optional). Returns: Accuracy `Tensor`. Raises: ValueError: if dtypes don't match or if dtype is not bool, integer, or string. """ if not (labels.dtype.is_integer or labels.dtype in (dtypes.bool, dtypes.string)): raise ValueError( 'Labels should have bool, integer, or string dtype, not %r' % labels.dtype) if not labels.dtype.is_compatible_with(predictions.dtype): raise ValueError('Dtypes of predictions and labels should match. ' 'Given: predictions (%r) and labels (%r)' % (predictions.dtype, labels.dtype)) with ops.name_scope(name, 'accuracy', values=[predictions, labels]): is_correct = math_ops.cast( math_ops.equal(predictions, labels), dtypes.float32) if weights is not None: is_correct = math_ops.multiply(is_correct, weights) num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct)) return math_ops.div(math_ops.reduce_sum(is_correct), math_ops.reduce_sum(num_values)) return math_ops.reduce_mean(is_correct)
apache-2.0
groschovskiy/personfinder
app/pytz/zoneinfo/America/Winnipeg.py
9
8004
'''tzinfo timezone information for America/Winnipeg.''' from pytz.tzinfo import DstTzInfo from pytz.tzinfo import memorized_datetime as d from pytz.tzinfo import memorized_ttinfo as i class Winnipeg(DstTzInfo): '''America/Winnipeg timezone definition. See datetime.tzinfo for details''' zone = 'America/Winnipeg' _utc_transition_times = [ d(1,1,1,0,0,0), d(1916,4,23,6,0,0), d(1916,9,17,5,0,0), d(1918,4,14,8,0,0), d(1918,10,31,7,0,0), d(1937,5,16,8,0,0), d(1937,9,26,7,0,0), d(1942,2,9,8,0,0), d(1945,8,14,23,0,0), d(1945,9,30,7,0,0), d(1946,5,12,8,0,0), d(1946,10,13,7,0,0), d(1947,4,27,8,0,0), d(1947,9,28,7,0,0), d(1948,4,25,8,0,0), d(1948,9,26,7,0,0), d(1949,4,24,8,0,0), d(1949,9,25,7,0,0), d(1950,5,1,8,0,0), d(1950,9,30,7,0,0), d(1951,4,29,8,0,0), d(1951,9,30,7,0,0), d(1952,4,27,8,0,0), d(1952,9,28,7,0,0), d(1953,4,26,8,0,0), d(1953,9,27,7,0,0), d(1954,4,25,8,0,0), d(1954,9,26,7,0,0), d(1955,4,24,8,0,0), d(1955,9,25,7,0,0), d(1956,4,29,8,0,0), d(1956,9,30,7,0,0), d(1957,4,28,8,0,0), d(1957,9,29,7,0,0), d(1958,4,27,8,0,0), d(1958,9,28,7,0,0), d(1959,4,26,8,0,0), d(1959,10,25,7,0,0), d(1960,4,24,8,0,0), d(1960,9,25,7,0,0), d(1963,4,28,8,0,0), d(1963,9,22,7,0,0), d(1966,4,24,8,0,0), d(1966,10,30,8,0,0), d(1967,4,30,8,0,0), d(1967,10,29,8,0,0), d(1968,4,28,8,0,0), d(1968,10,27,8,0,0), d(1969,4,27,8,0,0), d(1969,10,26,8,0,0), d(1970,4,26,8,0,0), d(1970,10,25,8,0,0), d(1971,4,25,8,0,0), d(1971,10,31,8,0,0), d(1972,4,30,8,0,0), d(1972,10,29,8,0,0), d(1973,4,29,8,0,0), d(1973,10,28,8,0,0), d(1974,4,28,8,0,0), d(1974,10,27,8,0,0), d(1975,4,27,8,0,0), d(1975,10,26,8,0,0), d(1976,4,25,8,0,0), d(1976,10,31,8,0,0), d(1977,4,24,8,0,0), d(1977,10,30,8,0,0), d(1978,4,30,8,0,0), d(1978,10,29,8,0,0), d(1979,4,29,8,0,0), d(1979,10,28,8,0,0), d(1980,4,27,8,0,0), d(1980,10,26,8,0,0), d(1981,4,26,8,0,0), d(1981,10,25,8,0,0), d(1982,4,25,8,0,0), d(1982,10,31,8,0,0), d(1983,4,24,8,0,0), d(1983,10,30,8,0,0), d(1984,4,29,8,0,0), d(1984,10,28,8,0,0), d(1985,4,28,8,0,0), d(1985,10,27,8,0,0), d(1986,4,27,8,0,0), d(1986,10,26,8,0,0), d(1987,4,5,8,0,0), d(1987,10,25,8,0,0), d(1988,4,3,8,0,0), d(1988,10,30,8,0,0), d(1989,4,2,8,0,0), d(1989,10,29,8,0,0), d(1990,4,1,8,0,0), d(1990,10,28,8,0,0), d(1991,4,7,8,0,0), d(1991,10,27,8,0,0), d(1992,4,5,8,0,0), d(1992,10,25,8,0,0), d(1993,4,4,8,0,0), d(1993,10,31,8,0,0), d(1994,4,3,8,0,0), d(1994,10,30,8,0,0), d(1995,4,2,8,0,0), d(1995,10,29,8,0,0), d(1996,4,7,8,0,0), d(1996,10,27,8,0,0), d(1997,4,6,8,0,0), d(1997,10,26,8,0,0), d(1998,4,5,8,0,0), d(1998,10,25,8,0,0), d(1999,4,4,8,0,0), d(1999,10,31,8,0,0), d(2000,4,2,8,0,0), d(2000,10,29,8,0,0), d(2001,4,1,8,0,0), d(2001,10,28,8,0,0), d(2002,4,7,8,0,0), d(2002,10,27,8,0,0), d(2003,4,6,8,0,0), d(2003,10,26,8,0,0), d(2004,4,4,8,0,0), d(2004,10,31,8,0,0), d(2005,4,3,8,0,0), d(2005,10,30,8,0,0), d(2006,1,1,6,0,0), d(2006,4,2,8,0,0), d(2006,10,29,7,0,0), d(2007,3,11,8,0,0), d(2007,11,4,7,0,0), d(2008,3,9,8,0,0), d(2008,11,2,7,0,0), d(2009,3,8,8,0,0), d(2009,11,1,7,0,0), d(2010,3,14,8,0,0), d(2010,11,7,7,0,0), d(2011,3,13,8,0,0), d(2011,11,6,7,0,0), d(2012,3,11,8,0,0), d(2012,11,4,7,0,0), d(2013,3,10,8,0,0), d(2013,11,3,7,0,0), d(2014,3,9,8,0,0), d(2014,11,2,7,0,0), d(2015,3,8,8,0,0), d(2015,11,1,7,0,0), d(2016,3,13,8,0,0), d(2016,11,6,7,0,0), d(2017,3,12,8,0,0), d(2017,11,5,7,0,0), d(2018,3,11,8,0,0), d(2018,11,4,7,0,0), d(2019,3,10,8,0,0), d(2019,11,3,7,0,0), d(2020,3,8,8,0,0), d(2020,11,1,7,0,0), d(2021,3,14,8,0,0), d(2021,11,7,7,0,0), d(2022,3,13,8,0,0), d(2022,11,6,7,0,0), d(2023,3,12,8,0,0), d(2023,11,5,7,0,0), d(2024,3,10,8,0,0), d(2024,11,3,7,0,0), d(2025,3,9,8,0,0), d(2025,11,2,7,0,0), d(2026,3,8,8,0,0), d(2026,11,1,7,0,0), d(2027,3,14,8,0,0), d(2027,11,7,7,0,0), d(2028,3,12,8,0,0), d(2028,11,5,7,0,0), d(2029,3,11,8,0,0), d(2029,11,4,7,0,0), d(2030,3,10,8,0,0), d(2030,11,3,7,0,0), d(2031,3,9,8,0,0), d(2031,11,2,7,0,0), d(2032,3,14,8,0,0), d(2032,11,7,7,0,0), d(2033,3,13,8,0,0), d(2033,11,6,7,0,0), d(2034,3,12,8,0,0), d(2034,11,5,7,0,0), d(2035,3,11,8,0,0), d(2035,11,4,7,0,0), d(2036,3,9,8,0,0), d(2036,11,2,7,0,0), d(2037,3,8,8,0,0), d(2037,11,1,7,0,0), ] _transition_info = [ i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CWT'), i(-18000,3600,'CPT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), i(-18000,3600,'CDT'), i(-21600,0,'CST'), ] Winnipeg = Winnipeg()
apache-2.0
laszlokiraly/OffenesParlament
offenesparlament/op_scraper/scraper/parlament/spiders/administrations.py
2
6360
# -*- coding: utf-8 -*- import scrapy from ansicolor import red from ansicolor import cyan from ansicolor import green from ansicolor import blue from django.db.models import Q from urllib import urlencode from parlament.settings import BASE_HOST from parlament.spiders.persons import PersonsSpider from parlament.resources.extractors.law import * from parlament.resources.extractors.prelaw import * from parlament.resources.extractors.person import * from parlament.resources.extractors.opinion import * from parlament.resources.extractors.administration import * from op_scraper.models import Person from op_scraper.models import Function from op_scraper.models import Mandate from op_scraper.models import Administration from op_scraper.models import LegislativePeriod class AdministrationsSpider(PersonsSpider): BASE_URL = "{}/{}".format(BASE_HOST, "WWER/BREG/REG/filter.psp") URLOPTIONS_ADMIN = { 'jsMode': '', 'xdocumentUri': '/WWER/BREG/REG/index.shtml', 'REG': '0', 'anwenden': 'Anwenden', 'FUNK': 'ALLE', 'RESS': 'ALLE', 'SUCH': '', 'listeId': '16', 'FBEZ': 'FW_016', 'pageNumber': '', } LLP = [] name = "administrations" title = "Administrations (Regierungen) Spider" persons_scraped = [] def __init__(self, **kw): super(AdministrationsSpider, self).__init__(**kw) self.start_urls = self.get_urls() self.cookies_seen = set() self.idlist = {} #self.print_debug() def get_urls(self): """ Overwritten from BaseSpider for non-LLP-based retrieval """ urls = [] url_options = urlencode(self.URLOPTIONS_ADMIN) url = "{}?{}".format(self.BASE_URL, url_options) urls.append(url) return urls def parse(self, response): persons = ADMINISTRATION.LIST.xt(response) callback_requests = [] self.logger.info( "Scraping {} persons".format(len(persons))) # Iterate all persons for p in persons: # Extract basic data parl_id = p['source_link'].split('/')[-2] p['source_link'] = "{}{}".format(BASE_HOST, p['source_link']) # Create or update simple person's item person_data = { 'reversed_name': p['reversed_name'] } person_item, created_person = Person.objects.update_or_create( source_link=p['source_link'], parl_id=parl_id, defaults=person_data ) if created_person: self.logger.debug(u"Created Person {}".format( green(u'[{}]'.format(p['reversed_name'])))) else: self.logger.debug(u"Updated Person {}".format( green(u"[{}]".format(p['reversed_name'])) )) mandate = p['mandate'] administration_item = self.get_administration_item(mandate) function_item, f_created = Function.objects.get_or_create( short=mandate['short'], title=mandate['title']) if f_created: self.logger.debug(u"Created function {}".format( green(u'[{}]'.format(function_item.short)))) # Create and append mandate try: mandate_item, m_created = Mandate.objects.update_or_create( person=person_item, function=function_item, administration=administration_item) # Let's try to find a matching LLP for this administration so we can # add it to this mandate try: llps = LegislativePeriod.objects\ .filter( start_date__lte=mandate[ 'administration']['end_date'] or datetime.date.today())\ .filter( Q(end_date__isnull=True) | Q( end_date__gte=mandate[ 'administration']['start_date'] ))\ .all() if llps: # always pick the latest, in case the adminstration # overlapped mandate_item.legislative_period = llps[ llps.count() - 1] mandate_item.save() except Exception as e: # # nope, that didn't work, but nevermind #passiveaggressivecomment # print e.message # import ipdb # ipdb.set_trace() pass except: self.logger.warning( red("Error saving Mandate {} ({})".format(function_item, administration_item))) import ipdb ipdb.set_trace() person_item.save() # First time we encounter a person, we scan her detail page too if not parl_id in self.persons_scraped: # Create Detail Page request req = scrapy.Request(p['source_link'], callback=self.parse_person_detail) req.meta['person'] = { 'reversed_name': p['reversed_name'], 'source_link': p['source_link'], 'parl_id': parl_id } callback_requests.append(req) self.persons_scraped.append(parl_id) return callback_requests def get_administration_item(self, mandate): # Do we have this administration already? admin_data = { 'start_date': mandate['administration']['start_date'], 'end_date': mandate['administration']['end_date'] } admin_item, created = Administration.objects.update_or_create( title=mandate['administration']['title'][0], defaults=admin_data) if created: admin_item.save() self.logger.debug(u"Created administration {}".format( green(u'[{}]'.format(admin_item.title)))) return admin_item
bsd-2-clause
PriceElectronics/linux-imx
tools/perf/scripts/python/failed-syscalls-by-pid.py
11180
2058
# failed system call counts, by pid # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide failed system call totals, broken down by pid. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_error_totals() def raw_syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, ret): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return if ret < 0: try: syscalls[common_comm][common_pid][id][ret] += 1 except TypeError: syscalls[common_comm][common_pid][id][ret] = 1 def print_error_totals(): if for_comm is not None: print "\nsyscall errors for %s:\n\n" % (for_comm), else: print "\nsyscall errors:\n\n", print "%-30s %10s\n" % ("comm [pid]", "count"), print "%-30s %10s\n" % ("------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id in id_keys: print " syscall: %-16s\n" % syscall_name(id), ret_keys = syscalls[comm][pid][id].keys() for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True): print " err = %-20s %10d\n" % (strerror(ret), val),
gpl-2.0
Heufneutje/PyHeufyBot
heufybot/modules/util/urlfollow.py
1
6495
from twisted.plugin import IPlugin from heufybot.channel import IRCChannel from heufybot.moduleinterface import BotModule, IBotModule from heufybot.utils.dummycontextmanager import DummyContextManager from heufybot.utils.signaltimeout import TimeoutException from zope.interface import implements from bs4 import BeautifulSoup from isodate import parse_duration from urlparse import urlparse import re, sys, time if sys.platform != "win32": from heufybot.utils.signaltimeout import SignalTimeout as SignalTimeout else: SignalTimeout = None class URLFollow(BotModule): implements(IPlugin, IBotModule) name = "URLFollow" def actions(self): return [ ("ctcp-message", 20, self.searchActions), ("message-channel", 20, self.searchChannelMessage), ("message-user", 20, self.searchPrivateMessage) ] def searchPrivateMessage(self, server, user, messageBody): self._searchURLs(server, user.nick, messageBody) def searchChannelMessage(self, server, channel, user, body): self._searchURLs(server, channel.name, body) def searchActions(self, server, source, user, body): if not body.upper().startswith("ACTION"): return if isinstance(source, IRCChannel): self._searchURLs(server, source.name, body) else: self._searchURLs(server, user.nick, body) def load(self): self.imgurClientID = None self.ytKey = None if "api-keys" not in self.bot.storage: self.bot.storage["api-keys"] = {} if "imgur" in self.bot.storage["api-keys"]: self.imgurClientID = self.bot.storage["api-keys"]["imgur"] if "google" in self.bot.storage["api-keys"]: self.ytKey = self.bot.storage["api-keys"]["google"] def _searchURLs(self, server, source, body): if not self.bot.moduleHandler.useModuleOnServer(self.name, server): return regex = re.compile(r"(https?://|www\.)[^\s]+", re.IGNORECASE) for url in filter(regex.match, body.split(" ")): response = self._handleURL(url) if response: self.bot.servers[server].outputHandler.cmdPRIVMSG(source, response) def _handleURL(self, url): ytMatch = re.search(r"(youtube\.com/watch.+v=|youtu\.be/)(?P<videoID>[^&#\?]{11})", url) if ytMatch: return self._handleYouTube(ytMatch.group("videoID")) imgurMatch = re.search(r"(i\.)?imgur\.com/(?P<imgurID>[^\.]+)", url) if imgurMatch: return self._handleImgur(imgurMatch.group("imgurID")) if not re.search("\.(jpe?g|gif|png|bmp)$", url): return self._handleGeneric(url) return None def _handleGeneric(self, url): with SignalTimeout(5) if SignalTimeout is not None else DummyContextManager(): try: result = self.bot.moduleHandler.runActionUntilValue("fetch-url", url) if not result or result.status_code != 200: return None parsed_uri = urlparse(result.url) soup = BeautifulSoup(result.content) title = soup.find("title").text.encode("utf-8", "ignore").replace("\r", "").replace("\n", " ") if len(title) > 300: title = title[:297] + "..." return "[URL] {} (at host: {}).".format(title, parsed_uri.hostname) except TimeoutException: return "The operation timed out." def _handleYouTube(self, videoID): params = { "id": videoID, "fields": "items(id,snippet(title,description),contentDetails(duration))", "parts": "snippet,contentDetails", } if self.ytKey: params["key"] = self.ytKey url = "https://www.googleapis.com/youtube/v3/videos" result = self.bot.moduleHandler.runActionUntilValue("fetch-url", url, params) if not result: return None j = result.json() if len(j["items"]) < 1: return None snippet = j["items"][0]["snippet"] title = snippet["title"].replace("\r", "").replace("\n", " ").encode("utf-8", "ignore") description = snippet["description"].replace("\r", "").replace("\n", " ").encode("utf-8", "ignore") durSeconds = parse_duration(j["items"][0]["contentDetails"]["duration"]).total_seconds() if len(description) > 149: description = description[:147] + "..." if durSeconds < 3600: duration = time.strftime("%M:%S", time.gmtime(durSeconds)) else: duration = time.strftime("%H:%M:%S", time.gmtime(durSeconds)) return "[YouTube] {} | {} | {}".format(title, duration, description) def _handleImgur(self, imgurID): if not self.imgurClientID: return albumLink = False if imgurID.startswith("gallery/"): imgurID = imgurID.replace("gallery/", "") url = "https://api.imgur.com/3/gallery/{}".format(imgurID) elif imgurID.startswith("a/"): imgurID = imgurID.replace("a/", "") url = "https://api.imgur.com/3/album/{}".format(imgurID) albumLink = True else: url = "https://api.imgur.com/3/image/{}".format(imgurID) headers = { "Authorization": "Client-ID {}".format(self.imgurClientID) } result = self.bot.moduleHandler.runActionUntilValue("fetch-url", url, None, headers) if not result: return j = result.json() if j["status"] != 200: return j = j["data"] data = [] if j["title"]: data.append("{}".format(j["title"].encode("utf-8", "ignore"))) else: data.append("No title") if j["nsfw"]: data.append("NSFW!") if albumLink: data.append("Album: {} images".format(j["images_count"])) elif "is_album" in j and j["is_album"]: data.append("Album: {} images".format(len(j["images"]))) if "animated" in j and j["animated"]: data.append("Animated!") if "width" in j and "height" in j: data.append("{}x{}".format(j["width"], j["height"])) if "size" in j: data.append("Size: {} kB".format(int(j["size"])/1024)) data.append("Views: {}".format(j["views"])) return "[Imgur] {}".format(" | ".join(data)) urlFollow = URLFollow()
mit
bygloam/yapf
yapf/yapflib/format_decision_state.py
1
14929
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implements a format decision state object that manages whitespace decisions. Each token is processed one at a time, at which point its whitespace formatting decisions are made. A graph of potential whitespace formattings is created, where each node in the graph is a format decision state object. The heuristic tries formatting the token with and without a newline before it to determine which one has the least penalty. Therefore, the format decision state object for each decision needs to be its own unique copy. Once the heuristic determines the best formatting, it makes a non-dry run pass through the code to commit the whitespace formatting. FormatDecisionState: main class exported by this module. """ import copy from yapf.yapflib import format_token from yapf.yapflib import style class FormatDecisionState(object): """The current state when indenting an unwrapped line. The FormatDecisionState object is meant to be copied instead of referenced. Attributes: first_indent: The indent of the first token. column: The number of used columns in the current line. next_token: The next token to be formatted. paren_level: The level of nesting inside (), [], and {}. start_of_line_level: The paren_level at the start of this line. lowest_level_on_line: The lowest paren_level on the current line. newline: Indicates if a newline is added along the edge to this format decision state node. previous: The previous format decision state in the decision tree. stack: A stack (of _ParenState) keeping track of properties applying to parenthesis levels. ignore_stack_for_comparison: Ignore the stack of _ParenState for state comparison. """ def __init__(self, line, first_indent): """Initializer. Initializes to the state after placing the first token from 'line' at 'first_indent'. Arguments: line: (UnwrappedLine) The unwrapped line we're currently processing. first_indent: (int) The indent of the first token. """ self.next_token = line.first self.column = first_indent self.paren_level = 0 self.start_of_line_level = 0 self.lowest_level_on_line = 0 self.ignore_stack_for_comparison = False self.stack = [_ParenState(first_indent, first_indent)] self.first_indent = first_indent self.newline = False self.previous = None self._MoveStateToNextToken() def Clone(self): new = copy.copy(self) new.stack = copy.deepcopy(self.stack) return new def __eq__(self, other): # Note: 'first_indent' is implicit in the stack. Also, we ignore 'previous', # because it shouldn't have a bearing on this comparison. (I.e., it will # report equal if 'next_token' does.) return (self.next_token == other.next_token and self.column == other.column and self.paren_level == other.paren_level and self.start_of_line_level == other.start_of_line_level and self.lowest_level_on_line == other.lowest_level_on_line and (self.ignore_stack_for_comparison or other.ignore_stack_for_comparison or self.stack == other.stack)) def __ne__(self, other): return not self == other def __hash__(self): return hash((self.next_token, self.column, self.paren_level, self.start_of_line_level, self.lowest_level_on_line)) def __repr__(self): return ('column::%d, next_token::%s, paren_level::%d, stack::[\n\t%s' % (self.column, repr(self.next_token), self.paren_level, '\n\t'.join(repr(s) for s in self.stack) + ']')) def CanSplit(self): """Returns True if the line can be split before the next token.""" current = self.next_token if not current.can_break_before: return False return True def MustSplit(self): """Returns True if the line must split before the next token.""" current = self.next_token previous_token = current.previous_token if current.must_break_before: return True if (self.stack[-1].split_before_closing_bracket and # FIXME(morbo): Use the 'matching_bracket' instead of this. # FIXME(morbo): Don't forget about tuples! current.value in ']}'): # Split if we need to split before the closing bracket and the next # token is a closing bracket. return True if previous_token: length = _GetLengthToMatchingParen(previous_token) if (previous_token.value == '{' and # TODO(morbo): List initializers? length + self.column > style.Get('COLUMN_LIMIT')): return True # TODO(morbo): This should be controlled with a knob. if (format_token.Subtype.DICTIONARY_KEY in current.subtypes and not current.is_comment): # Place each dictionary entry on its own line. return True # TODO(morbo): This should be controlled with a knob. if format_token.Subtype.DICT_SET_GENERATOR in current.subtypes: return True if (previous_token.value != '(' and format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in current.subtypes): return style.Get('SPLIT_BEFORE_NAMED_ASSIGNS') if (previous_token.value in '{[(' and current.lineno != previous_token.lineno): self.stack[-1].split_before_closing_bracket = True return True return False def AddTokenToState(self, newline, dry_run, must_split=False): """Add a token to the format decision state. Allow the heuristic to try out adding the token with and without a newline. Later on, the algorithm will determine which one has the lowest penalty. Arguments: newline: (bool) Add the token on a new line if True. dry_run: (bool) Don't commit whitespace changes to the FormatToken if True. must_split: (bool) A newline was required before this token. Returns: The penalty of splitting after the current token. """ penalty = 0 if newline: penalty = self._AddTokenOnNewline(dry_run, must_split) else: self._AddTokenOnCurrentLine(dry_run) return self._MoveStateToNextToken() + penalty def _AddTokenOnCurrentLine(self, dry_run): """Puts the token on the current line. Appends the next token to the state and updates information necessary for indentation. Arguments: dry_run: (bool) Commit whitespace changes to the FormatToken if True. """ current = self.next_token previous = current.previous_token spaces = current.spaces_required_before if not dry_run: current.AddWhitespacePrefix(newlines_before=0, spaces=spaces) if previous.OpensScope(): if not current.is_comment: # Align closing scopes that are on a newline with the opening scope: # # foo = [a, # b, # ] self.stack[-1].closing_scope_indent = previous.column if style.Get('ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'): self.stack[-1].closing_scope_indent += 1 self.stack[-1].indent = self.column + spaces else: self.stack[-1].closing_scope_indent = ( self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH') ) self.column += spaces def _AddTokenOnNewline(self, dry_run, must_split): """Adds a line break and necessary indentation. Appends the next token to the state and updates information necessary for indentation. Arguments: dry_run: (bool) Don't commit whitespace changes to the FormatToken if True. must_split: (bool) A newline was required before this token. Returns: The split penalty for splitting after the current state. """ current = self.next_token previous = current.previous_token self.column = self._GetNewlineColumn() if not dry_run: current.AddWhitespacePrefix(newlines_before=1, spaces=self.column) if not current.is_comment: self.stack[-1].last_space = self.column self.start_of_line_level = self.paren_level self.lowest_level_on_line = self.paren_level # Any break on this level means that the parent level has been broken and we # need to avoid bin packing there. for paren_state in self.stack: paren_state.split_before_parameter = True if (previous.value != ',' and not previous.is_binary_op and not current.is_binary_op and not previous.OpensScope()): self.stack[-1].split_before_parameter = True if (previous.OpensScope() or (previous.is_comment and previous.previous_token is not None and previous.previous_token.OpensScope())): self.stack[-1].closing_scope_indent = max( 0, self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH')) self.stack[-1].split_before_closing_bracket = True # Calculate the split penalty. penalty = current.split_penalty # Add a penalty for each increasing newline we add. last = self.stack[-1] penalty += ( style.Get('SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT') * last.num_line_splits ) if not must_split and current.value not in {'if', 'for'}: # Don't penalize for a must split or for splitting before an # if-expression or list comprehension. last.num_line_splits += 1 return penalty + 10 def _GetNewlineColumn(self): """Return the new column on the newline.""" current = self.next_token previous = current.previous_token top_of_stack = self.stack[-1] if current.spaces_required_before > 2: return current.spaces_required_before if current.OpensScope(): return self.first_indent if not self.paren_level else top_of_stack.indent if current.ClosesScope(): if (previous.OpensScope() or (previous.is_comment and previous.previous_token is not None and previous.previous_token.OpensScope())): return max( 0, self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH')) return top_of_stack.closing_scope_indent if (previous and previous.is_string and current.is_string and format_token.Subtype.DICTIONARY_VALUE in current.subtypes): return previous.column if format_token.Subtype.IF_TEST_EXPR in current.subtypes: return top_of_stack.indent + style.Get('INDENT_IF_EXPR_CONTINUATION') return top_of_stack.indent def _MoveStateToNextToken(self): """Calculate format decision state information and move onto the next token. Before moving onto the next token, we first calculate the format decision state given the current token and its formatting decisions. Then the format decision state is set up so that the next token can be added. Returns: The penalty for the number of characters over the column limit. """ current = self.next_token if not current.OpensScope() and not current.ClosesScope(): self.lowest_level_on_line = min(self.lowest_level_on_line, self.paren_level) # If we encounter an opening bracket, we add a level to our stack to prepare # for the subsequent tokens. if current.OpensScope(): last = self.stack[-1] new_indent = style.Get('CONTINUATION_INDENT_WIDTH') + last.last_space self.stack.append(_ParenState(new_indent, self.stack[-1].last_space)) self.stack[-1].break_before_paremeter = False self.paren_level += 1 # If we encounter a closing bracket, we can remove a level from our # parenthesis stack. if len(self.stack) > 1 and current.ClosesScope(): self.stack[-2].last_space = self.stack[-1].last_space self.stack.pop() self.paren_level -= 1 is_multiline_string = current.is_string and '\n' in current.value if is_multiline_string: # This is a multiline string. Only look at the first line. self.column += len(current.value.split('\n')[0]) else: self.column += len(current.value) self.next_token = self.next_token.next_token # Calculate the penalty for overflowing the column limit. penalty = 0 if self.column > style.Get('COLUMN_LIMIT') and not current.is_comment: excess_characters = self.column - style.Get('COLUMN_LIMIT') penalty = style.Get('SPLIT_PENALTY_EXCESS_CHARACTER') * excess_characters if is_multiline_string: # If this is a multiline string, the column is actually the # end of the last line in the string. self.column = len(current.value.split('\n')[-1]) return penalty def _GetLengthToMatchingParen(token): """Returns the length from one bracket to the matching bracket. Arguments: token: (FormatToken) The opening bracket token. Returns: The length to the closing paren or up to the first point where we can split the line. The length includes the brackets. """ if not token.matching_bracket: return 0 end = token.matching_bracket while end.next_token and not end.next_token.can_break_before: end = end.next_token return end.total_length - token.total_length + 1 class _ParenState(object): """Maintains the state of the bracket enclosures. A stack of _ParenState objects are kept so that we know how to indent relative to the brackets. Attributes: indent: The column position to which a specified parenthesis level needs to be indented. last_space: The column position of the last space on each level. split_before_closing_bracket: Whether a newline needs to be inserted before the closing bracket. We only want to insert a newline before the closing bracket if there also was a newline after the beginning left bracket. split_before_parameter: Split the line after the next comma. num_line_splits: Number of line splits this _ParenState contains already. Each subsequent line split gets an increasing penalty. """ # TODO(morbo): This doesn't track "bin packing." def __init__(self, indent, last_space): self.indent = indent self.last_space = last_space self.closing_scope_indent = 0 self.split_before_closing_bracket = False self.split_before_parameter = False self.num_line_splits = 0 def __repr__(self): return '[indent::%d, last_space::%d, closing_scope_indent::%d]' % ( self.indent, self.last_space, self.closing_scope_indent)
apache-2.0
openslack/openslack-wechat
examples/bae-echo-enterprise/wechatapp.py
14
1661
from __future__ import absolute_import, unicode_literals from flask import Flask, request, abort from wechatpy.enterprise.crypto import WeChatCrypto from wechatpy.exceptions import InvalidSignatureException from wechatpy.enterprise.exceptions import InvalidCorpIdException from wechatpy.enterprise import parse_message, create_reply TOKEN = '' EncodingAESKey = '' CorpId = '' app = Flask(__name__) @app.route('/wechat', methods=['GET', 'POST']) def wechat(): signature = request.args.get('msg_signature', '') timestamp = request.args.get('timestamp', '') nonce = request.args.get('nonce', '') crypto = WeChatCrypto(TOKEN, EncodingAESKey, CorpId) if request.method == 'GET': echo_str = request.args.get('echostr', '') try: echo_str = crypto.check_signature( signature, timestamp, nonce, echo_str ) except InvalidSignatureException: abort(403) return echo_str else: try: msg = crypto.decrypt_message( request.data, signature, timestamp, nonce ) except (InvalidSignatureException, InvalidCorpIdException): abort(403) msg = parse_message(msg) if msg.type == 'text': reply = create_reply(msg.content, msg).render() else: reply = create_reply('Can not handle this for now', msg).render() res = crypto.encrypt_message(reply, nonce, timestamp) return res if __name__ == '__main__': app.run('127.0.0.1', 5001, debug=True)
apache-2.0
ncultra/qemu
scripts/vmstate-static-checker.py
29
15449
#!/usr/bin/python # # Compares vmstate information stored in JSON format, obtained from # the -dump-vmstate QEMU command. # # Copyright 2014 Amit Shah <[email protected]> # Copyright 2014 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, see <http://www.gnu.org/licenses/>. import argparse import json import sys # Count the number of errors found taint = 0 def bump_taint(): global taint # Ensure we don't wrap around or reset to 0 -- the shell only has # an 8-bit return value. if taint < 255: taint = taint + 1 def check_fields_match(name, s_field, d_field): if s_field == d_field: return True # Some fields changed names between qemu versions. This list # is used to whitelist such changes in each section / description. changed_names = { 'apic': ['timer', 'timer_expiry'], 'e1000': ['dev', 'parent_obj'], 'ehci': ['dev', 'pcidev'], 'I440FX': ['dev', 'parent_obj'], 'ich9_ahci': ['card', 'parent_obj'], 'ich9-ahci': ['ahci', 'ich9_ahci'], 'ioh3420': ['PCIDevice', 'PCIEDevice'], 'ioh-3240-express-root-port': ['port.br.dev', 'parent_obj.parent_obj.parent_obj', 'port.br.dev.exp.aer_log', 'parent_obj.parent_obj.parent_obj.exp.aer_log'], 'cirrus_vga': ['hw_cursor_x', 'vga.hw_cursor_x', 'hw_cursor_y', 'vga.hw_cursor_y'], 'lsiscsi': ['dev', 'parent_obj'], 'mch': ['d', 'parent_obj'], 'pci_bridge': ['bridge.dev', 'parent_obj', 'bridge.dev.shpc', 'shpc'], 'pcnet': ['pci_dev', 'parent_obj'], 'PIIX3': ['pci_irq_levels', 'pci_irq_levels_vmstate'], 'piix4_pm': ['dev', 'parent_obj', 'pci0_status', 'acpi_pci_hotplug.acpi_pcihp_pci_status[0x0]', 'pm1a.sts', 'ar.pm1.evt.sts', 'pm1a.en', 'ar.pm1.evt.en', 'pm1_cnt.cnt', 'ar.pm1.cnt.cnt', 'tmr.timer', 'ar.tmr.timer', 'tmr.overflow_time', 'ar.tmr.overflow_time', 'gpe', 'ar.gpe'], 'rtl8139': ['dev', 'parent_obj'], 'qxl': ['num_surfaces', 'ssd.num_surfaces'], 'usb-ccid': ['abProtocolDataStructure', 'abProtocolDataStructure.data'], 'usb-host': ['dev', 'parent_obj'], 'usb-mouse': ['usb-ptr-queue', 'HIDPointerEventQueue'], 'usb-tablet': ['usb-ptr-queue', 'HIDPointerEventQueue'], 'vmware_vga': ['card', 'parent_obj'], 'vmware_vga_internal': ['depth', 'new_depth'], 'xhci': ['pci_dev', 'parent_obj'], 'x3130-upstream': ['PCIDevice', 'PCIEDevice'], 'xio3130-express-downstream-port': ['port.br.dev', 'parent_obj.parent_obj.parent_obj', 'port.br.dev.exp.aer_log', 'parent_obj.parent_obj.parent_obj.exp.aer_log'], 'xio3130-downstream': ['PCIDevice', 'PCIEDevice'], 'xio3130-express-upstream-port': ['br.dev', 'parent_obj.parent_obj', 'br.dev.exp.aer_log', 'parent_obj.parent_obj.exp.aer_log'], } if not name in changed_names: return False if s_field in changed_names[name] and d_field in changed_names[name]: return True return False def get_changed_sec_name(sec): # Section names can change -- see commit 292b1634 for an example. changes = { "ICH9 LPC": "ICH9-LPC", } for item in changes: if item == sec: return changes[item] if changes[item] == sec: return item return "" def exists_in_substruct(fields, item): # Some QEMU versions moved a few fields inside a substruct. This # kept the on-wire format the same. This function checks if # something got shifted inside a substruct. For example, the # change in commit 1f42d22233b4f3d1a2933ff30e8d6a6d9ee2d08f if not "Description" in fields: return False if not "Fields" in fields["Description"]: return False substruct_fields = fields["Description"]["Fields"] if substruct_fields == []: return False return check_fields_match(fields["Description"]["name"], substruct_fields[0]["field"], item) def check_fields(src_fields, dest_fields, desc, sec): # This function checks for all the fields in a section. If some # fields got embedded into a substruct, this function will also # attempt to check inside the substruct. d_iter = iter(dest_fields) s_iter = iter(src_fields) # Using these lists as stacks to store previous value of s_iter # and d_iter, so that when time comes to exit out of a substruct, # we can go back one level up and continue from where we left off. s_iter_list = [] d_iter_list = [] advance_src = True advance_dest = True unused_count = 0 while True: if advance_src: try: s_item = s_iter.next() except StopIteration: if s_iter_list == []: break s_iter = s_iter_list.pop() continue else: if unused_count == 0: # We want to avoid advancing just once -- when entering a # dest substruct, or when exiting one. advance_src = True if advance_dest: try: d_item = d_iter.next() except StopIteration: if d_iter_list == []: # We were not in a substruct print "Section \"" + sec + "\",", print "Description " + "\"" + desc + "\":", print "expected field \"" + s_item["field"] + "\",", print "while dest has no further fields" bump_taint() break d_iter = d_iter_list.pop() advance_src = False continue else: if unused_count == 0: advance_dest = True if unused_count > 0: if advance_dest == False: unused_count = unused_count - s_item["size"] if unused_count == 0: advance_dest = True continue if unused_count < 0: print "Section \"" + sec + "\",", print "Description \"" + desc + "\":", print "unused size mismatch near \"", print s_item["field"] + "\"" bump_taint() break continue if advance_src == False: unused_count = unused_count - d_item["size"] if unused_count == 0: advance_src = True continue if unused_count < 0: print "Section \"" + sec + "\",", print "Description \"" + desc + "\":", print "unused size mismatch near \"", print d_item["field"] + "\"" bump_taint() break continue if not check_fields_match(desc, s_item["field"], d_item["field"]): # Some fields were put in substructs, keeping the # on-wire format the same, but breaking static tools # like this one. # First, check if dest has a new substruct. if exists_in_substruct(d_item, s_item["field"]): # listiterators don't have a prev() function, so we # have to store our current location, descend into the # substruct, and ensure we come out as if nothing # happened when the substruct is over. # # Essentially we're opening the substructs that got # added which didn't change the wire format. d_iter_list.append(d_iter) substruct_fields = d_item["Description"]["Fields"] d_iter = iter(substruct_fields) advance_src = False continue # Next, check if src has substruct that dest removed # (can happen in backward migration: 2.0 -> 1.5) if exists_in_substruct(s_item, d_item["field"]): s_iter_list.append(s_iter) substruct_fields = s_item["Description"]["Fields"] s_iter = iter(substruct_fields) advance_dest = False continue if s_item["field"] == "unused" or d_item["field"] == "unused": if s_item["size"] == d_item["size"]: continue if d_item["field"] == "unused": advance_dest = False unused_count = d_item["size"] - s_item["size"] continue if s_item["field"] == "unused": advance_src = False unused_count = s_item["size"] - d_item["size"] continue print "Section \"" + sec + "\",", print "Description \"" + desc + "\":", print "expected field \"" + s_item["field"] + "\",", print "got \"" + d_item["field"] + "\"; skipping rest" bump_taint() break check_version(s_item, d_item, sec, desc) if not "Description" in s_item: # Check size of this field only if it's not a VMSTRUCT entry check_size(s_item, d_item, sec, desc, s_item["field"]) check_description_in_list(s_item, d_item, sec, desc) def check_subsections(src_sub, dest_sub, desc, sec): for s_item in src_sub: found = False for d_item in dest_sub: if s_item["name"] != d_item["name"]: continue found = True check_descriptions(s_item, d_item, sec) if not found: print "Section \"" + sec + "\", Description \"" + desc + "\":", print "Subsection \"" + s_item["name"] + "\" not found" bump_taint() def check_description_in_list(s_item, d_item, sec, desc): if not "Description" in s_item: return if not "Description" in d_item: print "Section \"" + sec + "\", Description \"" + desc + "\",", print "Field \"" + s_item["field"] + "\": missing description" bump_taint() return check_descriptions(s_item["Description"], d_item["Description"], sec) def check_descriptions(src_desc, dest_desc, sec): check_version(src_desc, dest_desc, sec, src_desc["name"]) if not check_fields_match(sec, src_desc["name"], dest_desc["name"]): print "Section \"" + sec + "\":", print "Description \"" + src_desc["name"] + "\"", print "missing, got \"" + dest_desc["name"] + "\" instead; skipping" bump_taint() return for f in src_desc: if not f in dest_desc: print "Section \"" + sec + "\"", print "Description \"" + src_desc["name"] + "\":", print "Entry \"" + f + "\" missing" bump_taint() continue if f == 'Fields': check_fields(src_desc[f], dest_desc[f], src_desc["name"], sec) if f == 'Subsections': check_subsections(src_desc[f], dest_desc[f], src_desc["name"], sec) def check_version(s, d, sec, desc=None): if s["version_id"] > d["version_id"]: print "Section \"" + sec + "\"", if desc: print "Description \"" + desc + "\":", print "version error:", s["version_id"], ">", d["version_id"] bump_taint() if not "minimum_version_id" in d: return if s["version_id"] < d["minimum_version_id"]: print "Section \"" + sec + "\"", if desc: print "Description \"" + desc + "\":", print "minimum version error:", s["version_id"], "<", print d["minimum_version_id"] bump_taint() def check_size(s, d, sec, desc=None, field=None): if s["size"] != d["size"]: print "Section \"" + sec + "\"", if desc: print "Description \"" + desc + "\"", if field: print "Field \"" + field + "\"", print "size mismatch:", s["size"], ",", d["size"] bump_taint() def check_machine_type(s, d): if s["Name"] != d["Name"]: print "Warning: checking incompatible machine types:", print "\"" + s["Name"] + "\", \"" + d["Name"] + "\"" return def main(): help_text = "Parse JSON-formatted vmstate dumps from QEMU in files SRC and DEST. Checks whether migration from SRC to DEST QEMU versions would break based on the VMSTATE information contained within the JSON outputs. The JSON output is created from a QEMU invocation with the -dump-vmstate parameter and a filename argument to it. Other parameters to QEMU do not matter, except the -M (machine type) parameter." parser = argparse.ArgumentParser(description=help_text) parser.add_argument('-s', '--src', type=file, required=True, help='json dump from src qemu') parser.add_argument('-d', '--dest', type=file, required=True, help='json dump from dest qemu') parser.add_argument('--reverse', required=False, default=False, action='store_true', help='reverse the direction') args = parser.parse_args() src_data = json.load(args.src) dest_data = json.load(args.dest) args.src.close() args.dest.close() if args.reverse: temp = src_data src_data = dest_data dest_data = temp for sec in src_data: dest_sec = sec if not dest_sec in dest_data: # Either the section name got changed, or the section # doesn't exist in dest. dest_sec = get_changed_sec_name(sec) if not dest_sec in dest_data: print "Section \"" + sec + "\" does not exist in dest" bump_taint() continue s = src_data[sec] d = dest_data[dest_sec] if sec == "vmschkmachine": check_machine_type(s, d) continue check_version(s, d, sec) for entry in s: if not entry in d: print "Section \"" + sec + "\": Entry \"" + entry + "\"", print "missing" bump_taint() continue if entry == "Description": check_descriptions(s[entry], d[entry], sec) return taint if __name__ == '__main__': sys.exit(main())
gpl-2.0
roandelyf/iTerm2
tools/ply/ply-3.4/example/BASIC/basinterp.py
166
17284
# This file provides the runtime support for running a basic program # Assumes the program has been parsed using basparse.py import sys import math import random class BasicInterpreter: # Initialize the interpreter. prog is a dictionary # containing (line,statement) mappings def __init__(self,prog): self.prog = prog self.functions = { # Built-in function table 'SIN' : lambda z: math.sin(self.eval(z)), 'COS' : lambda z: math.cos(self.eval(z)), 'TAN' : lambda z: math.tan(self.eval(z)), 'ATN' : lambda z: math.atan(self.eval(z)), 'EXP' : lambda z: math.exp(self.eval(z)), 'ABS' : lambda z: abs(self.eval(z)), 'LOG' : lambda z: math.log(self.eval(z)), 'SQR' : lambda z: math.sqrt(self.eval(z)), 'INT' : lambda z: int(self.eval(z)), 'RND' : lambda z: random.random() } # Collect all data statements def collect_data(self): self.data = [] for lineno in self.stat: if self.prog[lineno][0] == 'DATA': self.data = self.data + self.prog[lineno][1] self.dc = 0 # Initialize the data counter # Check for end statements def check_end(self): has_end = 0 for lineno in self.stat: if self.prog[lineno][0] == 'END' and not has_end: has_end = lineno if not has_end: print("NO END INSTRUCTION") self.error = 1 return if has_end != lineno: print("END IS NOT LAST") self.error = 1 # Check loops def check_loops(self): for pc in range(len(self.stat)): lineno = self.stat[pc] if self.prog[lineno][0] == 'FOR': forinst = self.prog[lineno] loopvar = forinst[1] for i in range(pc+1,len(self.stat)): if self.prog[self.stat[i]][0] == 'NEXT': nextvar = self.prog[self.stat[i]][1] if nextvar != loopvar: continue self.loopend[pc] = i break else: print("FOR WITHOUT NEXT AT LINE %s" % self.stat[pc]) self.error = 1 # Evaluate an expression def eval(self,expr): etype = expr[0] if etype == 'NUM': return expr[1] elif etype == 'GROUP': return self.eval(expr[1]) elif etype == 'UNARY': if expr[1] == '-': return -self.eval(expr[2]) elif etype == 'BINOP': if expr[1] == '+': return self.eval(expr[2])+self.eval(expr[3]) elif expr[1] == '-': return self.eval(expr[2])-self.eval(expr[3]) elif expr[1] == '*': return self.eval(expr[2])*self.eval(expr[3]) elif expr[1] == '/': return float(self.eval(expr[2]))/self.eval(expr[3]) elif expr[1] == '^': return abs(self.eval(expr[2]))**self.eval(expr[3]) elif etype == 'VAR': var,dim1,dim2 = expr[1] if not dim1 and not dim2: if var in self.vars: return self.vars[var] else: print("UNDEFINED VARIABLE %s AT LINE %s" % (var, self.stat[self.pc])) raise RuntimeError # May be a list lookup or a function evaluation if dim1 and not dim2: if var in self.functions: # A function return self.functions[var](dim1) else: # A list evaluation if var in self.lists: dim1val = self.eval(dim1) if dim1val < 1 or dim1val > len(self.lists[var]): print("LIST INDEX OUT OF BOUNDS AT LINE %s" % self.stat[self.pc]) raise RuntimeError return self.lists[var][dim1val-1] if dim1 and dim2: if var in self.tables: dim1val = self.eval(dim1) dim2val = self.eval(dim2) if dim1val < 1 or dim1val > len(self.tables[var]) or dim2val < 1 or dim2val > len(self.tables[var][0]): print("TABLE INDEX OUT OUT BOUNDS AT LINE %s" % self.stat[self.pc]) raise RuntimeError return self.tables[var][dim1val-1][dim2val-1] print("UNDEFINED VARIABLE %s AT LINE %s" % (var, self.stat[self.pc])) raise RuntimeError # Evaluate a relational expression def releval(self,expr): etype = expr[1] lhs = self.eval(expr[2]) rhs = self.eval(expr[3]) if etype == '<': if lhs < rhs: return 1 else: return 0 elif etype == '<=': if lhs <= rhs: return 1 else: return 0 elif etype == '>': if lhs > rhs: return 1 else: return 0 elif etype == '>=': if lhs >= rhs: return 1 else: return 0 elif etype == '=': if lhs == rhs: return 1 else: return 0 elif etype == '<>': if lhs != rhs: return 1 else: return 0 # Assignment def assign(self,target,value): var, dim1, dim2 = target if not dim1 and not dim2: self.vars[var] = self.eval(value) elif dim1 and not dim2: # List assignment dim1val = self.eval(dim1) if not var in self.lists: self.lists[var] = [0]*10 if dim1val > len(self.lists[var]): print ("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc]) raise RuntimeError self.lists[var][dim1val-1] = self.eval(value) elif dim1 and dim2: dim1val = self.eval(dim1) dim2val = self.eval(dim2) if not var in self.tables: temp = [0]*10 v = [] for i in range(10): v.append(temp[:]) self.tables[var] = v # Variable already exists if dim1val > len(self.tables[var]) or dim2val > len(self.tables[var][0]): print("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc]) raise RuntimeError self.tables[var][dim1val-1][dim2val-1] = self.eval(value) # Change the current line number def goto(self,linenum): if not linenum in self.prog: print("UNDEFINED LINE NUMBER %d AT LINE %d" % (linenum, self.stat[self.pc])) raise RuntimeError self.pc = self.stat.index(linenum) # Run it def run(self): self.vars = { } # All variables self.lists = { } # List variables self.tables = { } # Tables self.loops = [ ] # Currently active loops self.loopend= { } # Mapping saying where loops end self.gosub = None # Gosub return point (if any) self.error = 0 # Indicates program error self.stat = list(self.prog) # Ordered list of all line numbers self.stat.sort() self.pc = 0 # Current program counter # Processing prior to running self.collect_data() # Collect all of the data statements self.check_end() self.check_loops() if self.error: raise RuntimeError while 1: line = self.stat[self.pc] instr = self.prog[line] op = instr[0] # END and STOP statements if op == 'END' or op == 'STOP': break # We're done # GOTO statement elif op == 'GOTO': newline = instr[1] self.goto(newline) continue # PRINT statement elif op == 'PRINT': plist = instr[1] out = "" for label,val in plist: if out: out += ' '*(15 - (len(out) % 15)) out += label if val: if label: out += " " eval = self.eval(val) out += str(eval) sys.stdout.write(out) end = instr[2] if not (end == ',' or end == ';'): sys.stdout.write("\n") if end == ',': sys.stdout.write(" "*(15-(len(out) % 15))) if end == ';': sys.stdout.write(" "*(3-(len(out) % 3))) # LET statement elif op == 'LET': target = instr[1] value = instr[2] self.assign(target,value) # READ statement elif op == 'READ': for target in instr[1]: if self.dc < len(self.data): value = ('NUM',self.data[self.dc]) self.assign(target,value) self.dc += 1 else: # No more data. Program ends return elif op == 'IF': relop = instr[1] newline = instr[2] if (self.releval(relop)): self.goto(newline) continue elif op == 'FOR': loopvar = instr[1] initval = instr[2] finval = instr[3] stepval = instr[4] # Check to see if this is a new loop if not self.loops or self.loops[-1][0] != self.pc: # Looks like a new loop. Make the initial assignment newvalue = initval self.assign((loopvar,None,None),initval) if not stepval: stepval = ('NUM',1) stepval = self.eval(stepval) # Evaluate step here self.loops.append((self.pc,stepval)) else: # It's a repeat of the previous loop # Update the value of the loop variable according to the step stepval = ('NUM',self.loops[-1][1]) newvalue = ('BINOP','+',('VAR',(loopvar,None,None)),stepval) if self.loops[-1][1] < 0: relop = '>=' else: relop = '<=' if not self.releval(('RELOP',relop,newvalue,finval)): # Loop is done. Jump to the NEXT self.pc = self.loopend[self.pc] self.loops.pop() else: self.assign((loopvar,None,None),newvalue) elif op == 'NEXT': if not self.loops: print("NEXT WITHOUT FOR AT LINE %s" % line) return nextvar = instr[1] self.pc = self.loops[-1][0] loopinst = self.prog[self.stat[self.pc]] forvar = loopinst[1] if nextvar != forvar: print("NEXT DOESN'T MATCH FOR AT LINE %s" % line) return continue elif op == 'GOSUB': newline = instr[1] if self.gosub: print("ALREADY IN A SUBROUTINE AT LINE %s" % line) return self.gosub = self.stat[self.pc] self.goto(newline) continue elif op == 'RETURN': if not self.gosub: print("RETURN WITHOUT A GOSUB AT LINE %s" % line) return self.goto(self.gosub) self.gosub = None elif op == 'FUNC': fname = instr[1] pname = instr[2] expr = instr[3] def eval_func(pvalue,name=pname,self=self,expr=expr): self.assign((pname,None,None),pvalue) return self.eval(expr) self.functions[fname] = eval_func elif op == 'DIM': for vname,x,y in instr[1]: if y == 0: # Single dimension variable self.lists[vname] = [0]*x else: # Double dimension variable temp = [0]*y v = [] for i in range(x): v.append(temp[:]) self.tables[vname] = v self.pc += 1 # Utility functions for program listing def expr_str(self,expr): etype = expr[0] if etype == 'NUM': return str(expr[1]) elif etype == 'GROUP': return "(%s)" % self.expr_str(expr[1]) elif etype == 'UNARY': if expr[1] == '-': return "-"+str(expr[2]) elif etype == 'BINOP': return "%s %s %s" % (self.expr_str(expr[2]),expr[1],self.expr_str(expr[3])) elif etype == 'VAR': return self.var_str(expr[1]) def relexpr_str(self,expr): return "%s %s %s" % (self.expr_str(expr[2]),expr[1],self.expr_str(expr[3])) def var_str(self,var): varname,dim1,dim2 = var if not dim1 and not dim2: return varname if dim1 and not dim2: return "%s(%s)" % (varname, self.expr_str(dim1)) return "%s(%s,%s)" % (varname, self.expr_str(dim1),self.expr_str(dim2)) # Create a program listing def list(self): stat = list(self.prog) # Ordered list of all line numbers stat.sort() for line in stat: instr = self.prog[line] op = instr[0] if op in ['END','STOP','RETURN']: print("%s %s" % (line, op)) continue elif op == 'REM': print("%s %s" % (line, instr[1])) elif op == 'PRINT': _out = "%s %s " % (line, op) first = 1 for p in instr[1]: if not first: _out += ", " if p[0] and p[1]: _out += '"%s"%s' % (p[0],self.expr_str(p[1])) elif p[1]: _out += self.expr_str(p[1]) else: _out += '"%s"' % (p[0],) first = 0 if instr[2]: _out += instr[2] print(_out) elif op == 'LET': print("%s LET %s = %s" % (line,self.var_str(instr[1]),self.expr_str(instr[2]))) elif op == 'READ': _out = "%s READ " % line first = 1 for r in instr[1]: if not first: _out += "," _out += self.var_str(r) first = 0 print(_out) elif op == 'IF': print("%s IF %s THEN %d" % (line,self.relexpr_str(instr[1]),instr[2])) elif op == 'GOTO' or op == 'GOSUB': print("%s %s %s" % (line, op, instr[1])) elif op == 'FOR': _out = "%s FOR %s = %s TO %s" % (line,instr[1],self.expr_str(instr[2]),self.expr_str(instr[3])) if instr[4]: _out += " STEP %s" % (self.expr_str(instr[4])) print(_out) elif op == 'NEXT': print("%s NEXT %s" % (line, instr[1])) elif op == 'FUNC': print("%s DEF %s(%s) = %s" % (line,instr[1],instr[2],self.expr_str(instr[3]))) elif op == 'DIM': _out = "%s DIM " % line first = 1 for vname,x,y in instr[1]: if not first: _out += "," first = 0 if y == 0: _out += "%s(%d)" % (vname,x) else: _out += "%s(%d,%d)" % (vname,x,y) print(_out) elif op == 'DATA': _out = "%s DATA " % line first = 1 for v in instr[1]: if not first: _out += "," first = 0 _out += v print(_out) # Erase the current program def new(self): self.prog = {} # Insert statements def add_statements(self,prog): for line,stat in prog.items(): self.prog[line] = stat # Delete a statement def del_line(self,lineno): try: del self.prog[lineno] except KeyError: pass
gpl-2.0
hectorip/PolymerBoilerplate
bp_includes/external/requests/packages/chardet/langbulgarianmodel.py
2965
12784
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # Character Mapping Table: # this table is modified base on win1251BulgarianCharToOrderMap, so # only number <64 is sure valid Latin5_BulgarianCharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40 110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50 253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60 116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90 81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0 31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0 39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0 1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0 7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0 62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0 ) win1251BulgarianCharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40 110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50 253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60 116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70 206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80 221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90 88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0 73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0 31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0 39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0 1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0 7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0 ) # Model Table: # total sequences: 100% # first 512 sequences: 96.9392% # first 1024 sequences:3.0618% # rest sequences: 0.2992% # negative sequences: 0.0020% BulgarianLangModel = ( 0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2, 3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1, 0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0, 0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0, 0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0, 0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0, 0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0, 1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0, 0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0, 0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, 3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, 3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3, 2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1, 3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, 3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2, 1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0, 3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1, 1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0, 2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2, 2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0, 3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2, 1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0, 2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2, 2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0, 3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2, 1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0, 2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2, 2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0, 2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2, 1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0, 2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2, 1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0, 3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2, 1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0, 3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1, 1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0, 2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1, 1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0, 2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2, 1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0, 2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1, 1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0, 3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0, 1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2, 1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1, 2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2, 1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0, 2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2, 1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, 1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1, 0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2, 1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, 2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1, 1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0, 1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1, 0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0, 1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1, 0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, 2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1, 0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, 2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0, 1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1, 0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0, 0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0, 1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1, 1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0, 1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, ) Latin5BulgarianModel = { 'charToOrderMap': Latin5_BulgarianCharToOrderMap, 'precedenceMatrix': BulgarianLangModel, 'mTypicalPositiveRatio': 0.969392, 'keepEnglishLetter': False, 'charsetName': "ISO-8859-5" } Win1251BulgarianModel = { 'charToOrderMap': win1251BulgarianCharToOrderMap, 'precedenceMatrix': BulgarianLangModel, 'mTypicalPositiveRatio': 0.969392, 'keepEnglishLetter': False, 'charsetName': "windows-1251" } # flake8: noqa
gpl-2.0
ptonner/GPy
GPy/inference/latent_function_inference/var_gauss.py
15
2641
# Copyright (c) 2015, James Hensman # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np from ...util.linalg import pdinv from .posterior import Posterior from . import LatentFunctionInference log_2_pi = np.log(2*np.pi) class VarGauss(LatentFunctionInference): """ The Variational Gaussian Approximation revisited @article{Opper:2009, title = {The Variational Gaussian Approximation Revisited}, author = {Opper, Manfred and Archambeau, C{\'e}dric}, journal = {Neural Comput.}, year = {2009}, pages = {786--792}, } """ def __init__(self, alpha, beta): """ :param alpha: GPy.core.Param varational parameter :param beta: GPy.core.Param varational parameter """ self.alpha, self.beta = alpha, beta def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, Z=None): if mean_function is not None: raise NotImplementedError num_data, output_dim = Y.shape assert output_dim ==1, "Only one output supported" K = kern.K(X) m = K.dot(self.alpha) KB = K*self.beta[:, None] BKB = KB*self.beta[None, :] A = np.eye(num_data) + BKB Ai, LA, _, Alogdet = pdinv(A) Sigma = np.diag(self.beta**-2) - Ai/self.beta[:, None]/self.beta[None, :] # posterior coavairance: need full matrix for gradients var = np.diag(Sigma).reshape(-1,1) F, dF_dm, dF_dv, dF_dthetaL = likelihood.variational_expectations(Y, m, var, Y_metadata=Y_metadata) if dF_dthetaL is not None: dL_dthetaL = dF_dthetaL.sum(1).sum(1) else: dL_dthetaL = np.array([]) dF_da = np.dot(K, dF_dm) SigmaB = Sigma*self.beta #dF_db_ = -np.diag(Sigma.dot(np.diag(dF_dv.flatten())).dot(SigmaB))*2 dF_db = -2*np.sum(Sigma**2 * (dF_dv * self.beta), 0) #assert np.allclose(dF_db, dF_db_) KL = 0.5*(Alogdet + np.trace(Ai) - num_data + np.sum(m*self.alpha)) dKL_da = m A_A2 = Ai - Ai.dot(Ai) dKL_db = np.diag(np.dot(KB.T, A_A2)) log_marginal = F.sum() - KL self.alpha.gradient = dF_da - dKL_da self.beta.gradient = dF_db - dKL_db # K-gradients dKL_dK = 0.5*(self.alpha*self.alpha.T + self.beta[:, None]*self.beta[None, :]*A_A2) tmp = Ai*self.beta[:, None]/self.beta[None, :] dF_dK = self.alpha*dF_dm.T + np.dot(tmp*dF_dv, tmp.T) return Posterior(mean=m, cov=Sigma ,K=K),\ log_marginal,\ {'dL_dK':dF_dK-dKL_dK, 'dL_dthetaL':dL_dthetaL}
bsd-3-clause
ppanczyk/ansible
contrib/inventory/softlayer.py
29
7171
#!/usr/bin/env python """ SoftLayer external inventory script. The SoftLayer Python API client is required. Use `pip install softlayer` to install it. You have a few different options for configuring your username and api_key. You can pass environment variables (SL_USERNAME and SL_API_KEY). You can also write INI file to ~/.softlayer or /etc/softlayer.conf. For more information see the SL API at: - https://softlayer-python.readthedocs.org/en/latest/config_file.html The SoftLayer Python client has a built in command for saving this configuration file via the command `sl config setup`. """ # Copyright (C) 2014 AJ Bourg <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # I found the structure of the ec2.py script very helpful as an example # as I put this together. Thanks to whoever wrote that script! # import SoftLayer import re import argparse import itertools try: import json except: import simplejson as json class SoftLayerInventory(object): common_items = [ 'id', 'globalIdentifier', 'hostname', 'domain', 'fullyQualifiedDomainName', 'primaryBackendIpAddress', 'primaryIpAddress', 'datacenter', 'tagReferences.tag.name', 'userData.value', ] vs_items = [ 'lastKnownPowerState.name', 'powerState', 'maxCpu', 'maxMemory', 'activeTransaction.transactionStatus[friendlyName,name]', 'status', ] hw_items = [ 'hardwareStatusId', 'processorPhysicalCoreAmount', 'memoryCapacity', ] def _empty_inventory(self): return {"_meta": {"hostvars": {}}} def __init__(self): '''Main path''' self.inventory = self._empty_inventory() self.parse_options() if self.args.list: self.get_all_servers() print(self.json_format_dict(self.inventory, True)) elif self.args.host: self.get_virtual_servers() print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True)) def to_safe(self, word): '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups''' return re.sub("[^A-Za-z0-9\-\.]", "_", word) def push(self, my_dict, key, element): '''Push an element onto an array that may not have been defined in the dict''' if key in my_dict: my_dict[key].append(element) else: my_dict[key] = [element] def parse_options(self): '''Parse all the arguments from the CLI''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer') parser.add_argument('--list', action='store_true', default=False, help='List instances (default: False)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') self.args = parser.parse_args() def json_format_dict(self, data, pretty=False): '''Converts a dict to a JSON object and dumps it as a formatted string''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) def process_instance(self, instance, instance_type="virtual"): '''Populate the inventory dictionary with any instance information''' # only want active instances if 'status' in instance and instance['status']['name'] != 'Active': return # and powered on instances if 'powerState' in instance and instance['powerState']['name'] != 'Running': return # 5 is active for hardware... see https://forums.softlayer.com/forum/softlayer-developer-network/general-discussion/2955-hardwarestatusid if 'hardwareStatusId' in instance and instance['hardwareStatusId'] != 5: return # if there's no IP address, we can't reach it if 'primaryIpAddress' not in instance: return instance['userData'] = instance['userData'][0]['value'] if instance['userData'] else '' dest = instance['primaryIpAddress'] self.inventory["_meta"]["hostvars"][dest] = instance # Inventory: group by memory if 'maxMemory' in instance: self.push(self.inventory, self.to_safe('memory_' + str(instance['maxMemory'])), dest) elif 'memoryCapacity' in instance: self.push(self.inventory, self.to_safe('memory_' + str(instance['memoryCapacity'])), dest) # Inventory: group by cpu count if 'maxCpu' in instance: self.push(self.inventory, self.to_safe('cpu_' + str(instance['maxCpu'])), dest) elif 'processorPhysicalCoreAmount' in instance: self.push(self.inventory, self.to_safe('cpu_' + str(instance['processorPhysicalCoreAmount'])), dest) # Inventory: group by datacenter self.push(self.inventory, self.to_safe('datacenter_' + instance['datacenter']['name']), dest) # Inventory: group by hostname self.push(self.inventory, self.to_safe(instance['hostname']), dest) # Inventory: group by FQDN self.push(self.inventory, self.to_safe(instance['fullyQualifiedDomainName']), dest) # Inventory: group by domain self.push(self.inventory, self.to_safe(instance['domain']), dest) # Inventory: group by type (hardware/virtual) self.push(self.inventory, instance_type, dest) # Inventory: group by tag for tag in instance['tagReferences']: self.push(self.inventory, tag['tag']['name'], dest) def get_virtual_servers(self): '''Get all the CCI instances''' vs = SoftLayer.VSManager(self.client) mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items)) instances = vs.list_instances(mask=mask) for instance in instances: self.process_instance(instance) def get_physical_servers(self): '''Get all the hardware instances''' hw = SoftLayer.HardwareManager(self.client) mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items)) instances = hw.list_hardware(mask=mask) for instance in instances: self.process_instance(instance, 'hardware') def get_all_servers(self): self.client = SoftLayer.Client() self.get_virtual_servers() self.get_physical_servers() SoftLayerInventory()
gpl-3.0
abergeron/DeepLearningTutorials
code/convolutional_mlp.py
4
12722
"""This tutorial introduces the LeNet5 neural network architecture using Theano. LeNet5 is a convolutional neural network, good for classifying images. This tutorial shows how to build the architecture, and comes with all the hyper-parameters you need to reproduce the paper's MNIST results. This implementation simplifies the model in the following ways: - LeNetConvPool doesn't implement location-specific gain and bias parameters - LeNetConvPool doesn't implement pooling by average, it implements pooling by max. - Digit classification is implemented with a logistic regression rather than an RBF network - LeNet5 was not fully-connected convolutions at second layer References: - Y. LeCun, L. Bottou, Y. Bengio and P. Haffner: Gradient-Based Learning Applied to Document Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998. http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf """ from __future__ import print_function import os import sys import timeit import numpy import theano import theano.tensor as T from theano.tensor.signal import pool from theano.tensor.nnet import conv2d from logistic_sgd import LogisticRegression, load_data from mlp import HiddenLayer class LeNetConvPoolLayer(object): """Pool Layer of a convolutional network """ def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)): """ Allocate a LeNetConvPoolLayer with shared variable internal parameters. :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type input: theano.tensor.dtensor4 :param input: symbolic image tensor, of shape image_shape :type filter_shape: tuple or list of length 4 :param filter_shape: (number of filters, num input feature maps, filter height, filter width) :type image_shape: tuple or list of length 4 :param image_shape: (batch size, num input feature maps, image height, image width) :type poolsize: tuple or list of length 2 :param poolsize: the downsampling (pooling) factor (#rows, #cols) """ assert image_shape[1] == filter_shape[1] self.input = input # there are "num input feature maps * filter height * filter width" # inputs to each hidden unit fan_in = numpy.prod(filter_shape[1:]) # each unit in the lower layer receives a gradient from: # "num output feature maps * filter height * filter width" / # pooling size fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) // numpy.prod(poolsize)) # initialize weights with random weights W_bound = numpy.sqrt(6. / (fan_in + fan_out)) self.W = theano.shared( numpy.asarray( rng.uniform(low=-W_bound, high=W_bound, size=filter_shape), dtype=theano.config.floatX ), borrow=True ) # the bias is a 1D tensor -- one bias per output feature map b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX) self.b = theano.shared(value=b_values, borrow=True) # convolve input feature maps with filters conv_out = conv2d( input=input, filters=self.W, filter_shape=filter_shape, input_shape=image_shape ) # pool each feature map individually, using maxpooling pooled_out = pool.pool_2d( input=conv_out, ds=poolsize, ignore_border=True ) # add the bias term. Since the bias is a vector (1D array), we first # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will # thus be broadcasted across mini-batches and feature map # width & height self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) # store parameters of this layer self.params = [self.W, self.b] # keep track of model input self.input = input def evaluate_lenet5(learning_rate=0.1, n_epochs=200, dataset='mnist.pkl.gz', nkerns=[20, 50], batch_size=500): """ Demonstrates lenet on MNIST dataset :type learning_rate: float :param learning_rate: learning rate used (factor for the stochastic gradient) :type n_epochs: int :param n_epochs: maximal number of epochs to run the optimizer :type dataset: string :param dataset: path to the dataset used for training /testing (MNIST here) :type nkerns: list of ints :param nkerns: number of kernels on each layer """ rng = numpy.random.RandomState(23455) datasets = load_data(dataset) train_set_x, train_set_y = datasets[0] valid_set_x, valid_set_y = datasets[1] test_set_x, test_set_y = datasets[2] # compute number of minibatches for training, validation and testing n_train_batches = train_set_x.get_value(borrow=True).shape[0] n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] n_test_batches = test_set_x.get_value(borrow=True).shape[0] n_train_batches //= batch_size n_valid_batches //= batch_size n_test_batches //= batch_size # allocate symbolic variables for the data index = T.lscalar() # index to a [mini]batch # start-snippet-1 x = T.matrix('x') # the data is presented as rasterized images y = T.ivector('y') # the labels are presented as 1D vector of # [int] labels ###################### # BUILD ACTUAL MODEL # ###################### print('... building the model') # Reshape matrix of rasterized images of shape (batch_size, 28 * 28) # to a 4D tensor, compatible with our LeNetConvPoolLayer # (28, 28) is the size of MNIST images. layer0_input = x.reshape((batch_size, 1, 28, 28)) # Construct the first convolutional pooling layer: # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24) # maxpooling reduces this further to (24/2, 24/2) = (12, 12) # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12) layer0 = LeNetConvPoolLayer( rng, input=layer0_input, image_shape=(batch_size, 1, 28, 28), filter_shape=(nkerns[0], 1, 5, 5), poolsize=(2, 2) ) # Construct the second convolutional pooling layer # filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8) # maxpooling reduces this further to (8/2, 8/2) = (4, 4) # 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4) layer1 = LeNetConvPoolLayer( rng, input=layer0.output, image_shape=(batch_size, nkerns[0], 12, 12), filter_shape=(nkerns[1], nkerns[0], 5, 5), poolsize=(2, 2) ) # the HiddenLayer being fully-connected, it operates on 2D matrices of # shape (batch_size, num_pixels) (i.e matrix of rasterized images). # This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4), # or (500, 50 * 4 * 4) = (500, 800) with the default values. layer2_input = layer1.output.flatten(2) # construct a fully-connected sigmoidal layer layer2 = HiddenLayer( rng, input=layer2_input, n_in=nkerns[1] * 4 * 4, n_out=500, activation=T.tanh ) # classify the values of the fully-connected sigmoidal layer layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10) # the cost we minimize during training is the NLL of the model cost = layer3.negative_log_likelihood(y) # create a function to compute the mistakes that are made by the model test_model = theano.function( [index], layer3.errors(y), givens={ x: test_set_x[index * batch_size: (index + 1) * batch_size], y: test_set_y[index * batch_size: (index + 1) * batch_size] } ) validate_model = theano.function( [index], layer3.errors(y), givens={ x: valid_set_x[index * batch_size: (index + 1) * batch_size], y: valid_set_y[index * batch_size: (index + 1) * batch_size] } ) # create a list of all model parameters to be fit by gradient descent params = layer3.params + layer2.params + layer1.params + layer0.params # create a list of gradients for all model parameters grads = T.grad(cost, params) # train_model is a function that updates the model parameters by # SGD Since this model has many parameters, it would be tedious to # manually create an update rule for each model parameter. We thus # create the updates list by automatically looping over all # (params[i], grads[i]) pairs. updates = [ (param_i, param_i - learning_rate * grad_i) for param_i, grad_i in zip(params, grads) ] train_model = theano.function( [index], cost, updates=updates, givens={ x: train_set_x[index * batch_size: (index + 1) * batch_size], y: train_set_y[index * batch_size: (index + 1) * batch_size] } ) # end-snippet-1 ############### # TRAIN MODEL # ############### print('... training') # early-stopping parameters patience = 10000 # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is # found improvement_threshold = 0.995 # a relative improvement of this much is # considered significant validation_frequency = min(n_train_batches, patience // 2) # go through this many # minibatche before checking the network # on the validation set; in this case we # check every epoch best_validation_loss = numpy.inf best_iter = 0 test_score = 0. start_time = timeit.default_timer() epoch = 0 done_looping = False while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 for minibatch_index in range(n_train_batches): iter = (epoch - 1) * n_train_batches + minibatch_index if iter % 100 == 0: print('training @ iter = ', iter) cost_ij = train_model(minibatch_index) if (iter + 1) % validation_frequency == 0: # compute zero-one loss on validation set validation_losses = [validate_model(i) for i in range(n_valid_batches)] this_validation_loss = numpy.mean(validation_losses) print('epoch %i, minibatch %i/%i, validation error %f %%' % (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.)) # if we got the best validation score until now if this_validation_loss < best_validation_loss: #improve patience if loss improvement is good enough if this_validation_loss < best_validation_loss * \ improvement_threshold: patience = max(patience, iter * patience_increase) # save best validation score and iteration number best_validation_loss = this_validation_loss best_iter = iter # test it on the test set test_losses = [ test_model(i) for i in range(n_test_batches) ] test_score = numpy.mean(test_losses) print((' epoch %i, minibatch %i/%i, test error of ' 'best model %f %%') % (epoch, minibatch_index + 1, n_train_batches, test_score * 100.)) if patience <= iter: done_looping = True break end_time = timeit.default_timer() print('Optimization complete.') print('Best validation score of %f %% obtained at iteration %i, ' 'with test performance %f %%' % (best_validation_loss * 100., best_iter + 1, test_score * 100.)) print(('The code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr) if __name__ == '__main__': evaluate_lenet5() def experiment(state, channel): evaluate_lenet5(state.learning_rate, dataset=state.dataset)
bsd-3-clause
simbha/mAngE-Gin
lib/django/contrib/formtools/wizard/storage/base.py
79
4920
from django.core.files.uploadedfile import UploadedFile from django.utils.datastructures import MultiValueDict from django.utils.functional import lazy_property from django.utils import six from django.contrib.formtools.wizard.storage.exceptions import NoFileStorageConfigured class BaseStorage(object): step_key = 'step' step_data_key = 'step_data' step_files_key = 'step_files' extra_data_key = 'extra_data' def __init__(self, prefix, request=None, file_storage=None): self.prefix = 'wizard_%s' % prefix self.request = request self.file_storage = file_storage self._files = {} self._tmp_files = [] def init_data(self): self.data = { self.step_key: None, self.step_data_key: {}, self.step_files_key: {}, self.extra_data_key: {}, } def reset(self): # Store unused temporary file names in order to delete them # at the end of the response cycle through a callback attached in # `update_response`. wizard_files = self.data[self.step_files_key] for step_files in six.itervalues(wizard_files): for step_file in six.itervalues(step_files): self._tmp_files.append(step_file['tmp_name']) self.init_data() def _get_current_step(self): return self.data[self.step_key] def _set_current_step(self, step): self.data[self.step_key] = step current_step = lazy_property(_get_current_step, _set_current_step) def _get_extra_data(self): return self.data[self.extra_data_key] def _set_extra_data(self, extra_data): self.data[self.extra_data_key] = extra_data extra_data = lazy_property(_get_extra_data, _set_extra_data) def get_step_data(self, step): # When reading the serialized data, upconvert it to a MultiValueDict, # some serializers (json) don't preserve the type of the object. values = self.data[self.step_data_key].get(step, None) if values is not None: values = MultiValueDict(values) return values def set_step_data(self, step, cleaned_data): # If the value is a MultiValueDict, convert it to a regular dict of the # underlying contents. Some serializers call the public API on it (as # opposed to the underlying dict methods), in which case the content # can be truncated (__getitem__ returns only the first item). if isinstance(cleaned_data, MultiValueDict): cleaned_data = dict(cleaned_data.lists()) self.data[self.step_data_key][step] = cleaned_data @property def current_step_data(self): return self.get_step_data(self.current_step) def get_step_files(self, step): wizard_files = self.data[self.step_files_key].get(step, {}) if wizard_files and not self.file_storage: raise NoFileStorageConfigured( "You need to define 'file_storage' in your " "wizard view in order to handle file uploads.") files = {} for field, field_dict in six.iteritems(wizard_files): field_dict = field_dict.copy() tmp_name = field_dict.pop('tmp_name') if (step, field) not in self._files: self._files[(step, field)] = UploadedFile( file=self.file_storage.open(tmp_name), **field_dict) files[field] = self._files[(step, field)] return files or None def set_step_files(self, step, files): if files and not self.file_storage: raise NoFileStorageConfigured( "You need to define 'file_storage' in your " "wizard view in order to handle file uploads.") if step not in self.data[self.step_files_key]: self.data[self.step_files_key][step] = {} for field, field_file in six.iteritems(files or {}): tmp_filename = self.file_storage.save(field_file.name, field_file) file_dict = { 'tmp_name': tmp_filename, 'name': field_file.name, 'content_type': field_file.content_type, 'size': field_file.size, 'charset': field_file.charset } self.data[self.step_files_key][step][field] = file_dict @property def current_step_files(self): return self.get_step_files(self.current_step) def update_response(self, response): def post_render_callback(response): for file in self._files.values(): if not file.closed: file.close() for tmp_file in self._tmp_files: self.file_storage.delete(tmp_file) if hasattr(response, 'render'): response.add_post_render_callback(post_render_callback) else: post_render_callback(response)
mit
ma314smith/home-assistant
homeassistant/components/notify/command_line.py
11
1562
""" Support for command line notification services. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/notify.command_line/ """ import logging import subprocess import voluptuous as vol from homeassistant.const import (CONF_COMMAND, CONF_NAME) from homeassistant.components.notify import ( BaseNotificationService, PLATFORM_SCHEMA) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_COMMAND): cv.string, vol.Optional(CONF_NAME): cv.string, }) def get_service(hass, config): """Get the Command Line notification service.""" command = config[CONF_COMMAND] return CommandLineNotificationService(command) class CommandLineNotificationService(BaseNotificationService): """Implement the notification service for the Command Line service.""" def __init__(self, command): """Initialize the service.""" self.command = command def send_message(self, message="", **kwargs): """Send a message to a command line.""" try: proc = subprocess.Popen(self.command, universal_newlines=True, stdin=subprocess.PIPE, shell=True) proc.communicate(input=message) if proc.returncode != 0: _LOGGER.error('Command failed: %s', self.command) except subprocess.SubprocessError: _LOGGER.error('Error trying to exec Command: %s', self.command)
mit
seanmonstar/servo
src/components/script/dom/bindings/codegen/parser/tests/test_attr.py
106
12541
import WebIDL def WebIDLTest(parser, harness): testData = [("::TestAttr%s::b", "b", "Byte%s", False), ("::TestAttr%s::rb", "rb", "Byte%s", True), ("::TestAttr%s::o", "o", "Octet%s", False), ("::TestAttr%s::ro", "ro", "Octet%s", True), ("::TestAttr%s::s", "s", "Short%s", False), ("::TestAttr%s::rs", "rs", "Short%s", True), ("::TestAttr%s::us", "us", "UnsignedShort%s", False), ("::TestAttr%s::rus", "rus", "UnsignedShort%s", True), ("::TestAttr%s::l", "l", "Long%s", False), ("::TestAttr%s::rl", "rl", "Long%s", True), ("::TestAttr%s::ul", "ul", "UnsignedLong%s", False), ("::TestAttr%s::rul", "rul", "UnsignedLong%s", True), ("::TestAttr%s::ll", "ll", "LongLong%s", False), ("::TestAttr%s::rll", "rll", "LongLong%s", True), ("::TestAttr%s::ull", "ull", "UnsignedLongLong%s", False), ("::TestAttr%s::rull", "rull", "UnsignedLongLong%s", True), ("::TestAttr%s::str", "str", "String%s", False), ("::TestAttr%s::rstr", "rstr", "String%s", True), ("::TestAttr%s::obj", "obj", "Object%s", False), ("::TestAttr%s::robj", "robj", "Object%s", True), ("::TestAttr%s::object", "object", "Object%s", False), ("::TestAttr%s::f", "f", "Float%s", False), ("::TestAttr%s::rf", "rf", "Float%s", True)] parser.parse(""" interface TestAttr { attribute byte b; readonly attribute byte rb; attribute octet o; readonly attribute octet ro; attribute short s; readonly attribute short rs; attribute unsigned short us; readonly attribute unsigned short rus; attribute long l; readonly attribute long rl; attribute unsigned long ul; readonly attribute unsigned long rul; attribute long long ll; readonly attribute long long rll; attribute unsigned long long ull; readonly attribute unsigned long long rull; attribute DOMString str; readonly attribute DOMString rstr; attribute object obj; readonly attribute object robj; attribute object _object; attribute float f; readonly attribute float rf; }; interface TestAttrNullable { attribute byte? b; readonly attribute byte? rb; attribute octet? o; readonly attribute octet? ro; attribute short? s; readonly attribute short? rs; attribute unsigned short? us; readonly attribute unsigned short? rus; attribute long? l; readonly attribute long? rl; attribute unsigned long? ul; readonly attribute unsigned long? rul; attribute long long? ll; readonly attribute long long? rll; attribute unsigned long long? ull; readonly attribute unsigned long long? rull; attribute DOMString? str; readonly attribute DOMString? rstr; attribute object? obj; readonly attribute object? robj; attribute object? _object; attribute float? f; readonly attribute float? rf; }; interface TestAttrArray { attribute byte[] b; readonly attribute byte[] rb; attribute octet[] o; readonly attribute octet[] ro; attribute short[] s; readonly attribute short[] rs; attribute unsigned short[] us; readonly attribute unsigned short[] rus; attribute long[] l; readonly attribute long[] rl; attribute unsigned long[] ul; readonly attribute unsigned long[] rul; attribute long long[] ll; readonly attribute long long[] rll; attribute unsigned long long[] ull; readonly attribute unsigned long long[] rull; attribute DOMString[] str; readonly attribute DOMString[] rstr; attribute object[] obj; readonly attribute object[] robj; attribute object[] _object; attribute float[] f; readonly attribute float[] rf; }; interface TestAttrNullableArray { attribute byte[]? b; readonly attribute byte[]? rb; attribute octet[]? o; readonly attribute octet[]? ro; attribute short[]? s; readonly attribute short[]? rs; attribute unsigned short[]? us; readonly attribute unsigned short[]? rus; attribute long[]? l; readonly attribute long[]? rl; attribute unsigned long[]? ul; readonly attribute unsigned long[]? rul; attribute long long[]? ll; readonly attribute long long[]? rll; attribute unsigned long long[]? ull; readonly attribute unsigned long long[]? rull; attribute DOMString[]? str; readonly attribute DOMString[]? rstr; attribute object[]? obj; readonly attribute object[]? robj; attribute object[]? _object; attribute float[]? f; readonly attribute float[]? rf; }; interface TestAttrArrayOfNullableTypes { attribute byte?[] b; readonly attribute byte?[] rb; attribute octet?[] o; readonly attribute octet?[] ro; attribute short?[] s; readonly attribute short?[] rs; attribute unsigned short?[] us; readonly attribute unsigned short?[] rus; attribute long?[] l; readonly attribute long?[] rl; attribute unsigned long?[] ul; readonly attribute unsigned long?[] rul; attribute long long?[] ll; readonly attribute long long?[] rll; attribute unsigned long long?[] ull; readonly attribute unsigned long long?[] rull; attribute DOMString?[] str; readonly attribute DOMString?[] rstr; attribute object?[] obj; readonly attribute object?[] robj; attribute object?[] _object; attribute float?[] f; readonly attribute float?[] rf; }; interface TestAttrNullableArrayOfNullableTypes { attribute byte?[]? b; readonly attribute byte?[]? rb; attribute octet?[]? o; readonly attribute octet?[]? ro; attribute short?[]? s; readonly attribute short?[]? rs; attribute unsigned short?[]? us; readonly attribute unsigned short?[]? rus; attribute long?[]? l; readonly attribute long?[]? rl; attribute unsigned long?[]? ul; readonly attribute unsigned long?[]? rul; attribute long long?[]? ll; readonly attribute long long?[]? rll; attribute unsigned long long?[]? ull; readonly attribute unsigned long long?[]? rull; attribute DOMString?[]? str; readonly attribute DOMString?[]? rstr; attribute object?[]? obj; readonly attribute object?[]? robj; attribute object?[]? _object; attribute float?[]? f; readonly attribute float?[]? rf; }; """) results = parser.finish() def checkAttr(attr, QName, name, type, readonly): harness.ok(isinstance(attr, WebIDL.IDLAttribute), "Should be an IDLAttribute") harness.ok(attr.isAttr(), "Attr is an Attr") harness.ok(not attr.isMethod(), "Attr is not an method") harness.ok(not attr.isConst(), "Attr is not a const") harness.check(attr.identifier.QName(), QName, "Attr has the right QName") harness.check(attr.identifier.name, name, "Attr has the right name") harness.check(str(attr.type), type, "Attr has the right type") harness.check(attr.readonly, readonly, "Attr's readonly state is correct") harness.ok(True, "TestAttr interface parsed without error.") harness.check(len(results), 6, "Should be six productions.") iface = results[0] harness.ok(isinstance(iface, WebIDL.IDLInterface), "Should be an IDLInterface") harness.check(iface.identifier.QName(), "::TestAttr", "Interface has the right QName") harness.check(iface.identifier.name, "TestAttr", "Interface has the right name") harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData)) attrs = iface.members for i in range(len(attrs)): data = testData[i] attr = attrs[i] (QName, name, type, readonly) = data checkAttr(attr, QName % "", name, type % "", readonly) iface = results[1] harness.ok(isinstance(iface, WebIDL.IDLInterface), "Should be an IDLInterface") harness.check(iface.identifier.QName(), "::TestAttrNullable", "Interface has the right QName") harness.check(iface.identifier.name, "TestAttrNullable", "Interface has the right name") harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData)) attrs = iface.members for i in range(len(attrs)): data = testData[i] attr = attrs[i] (QName, name, type, readonly) = data checkAttr(attr, QName % "Nullable", name, type % "OrNull", readonly) iface = results[2] harness.ok(isinstance(iface, WebIDL.IDLInterface), "Should be an IDLInterface") harness.check(iface.identifier.QName(), "::TestAttrArray", "Interface has the right QName") harness.check(iface.identifier.name, "TestAttrArray", "Interface has the right name") harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData)) attrs = iface.members for i in range(len(attrs)): data = testData[i] attr = attrs[i] (QName, name, type, readonly) = data checkAttr(attr, QName % "Array", name, type % "Array", readonly) iface = results[3] harness.ok(isinstance(iface, WebIDL.IDLInterface), "Should be an IDLInterface") harness.check(iface.identifier.QName(), "::TestAttrNullableArray", "Interface has the right QName") harness.check(iface.identifier.name, "TestAttrNullableArray", "Interface has the right name") harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData)) attrs = iface.members for i in range(len(attrs)): data = testData[i] attr = attrs[i] (QName, name, type, readonly) = data checkAttr(attr, QName % "NullableArray", name, type % "ArrayOrNull", readonly) iface = results[4] harness.ok(isinstance(iface, WebIDL.IDLInterface), "Should be an IDLInterface") harness.check(iface.identifier.QName(), "::TestAttrArrayOfNullableTypes", "Interface has the right QName") harness.check(iface.identifier.name, "TestAttrArrayOfNullableTypes", "Interface has the right name") harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData)) attrs = iface.members for i in range(len(attrs)): data = testData[i] attr = attrs[i] (QName, name, type, readonly) = data checkAttr(attr, QName % "ArrayOfNullableTypes", name, type % "OrNullArray", readonly) iface = results[5] harness.ok(isinstance(iface, WebIDL.IDLInterface), "Should be an IDLInterface") harness.check(iface.identifier.QName(), "::TestAttrNullableArrayOfNullableTypes", "Interface has the right QName") harness.check(iface.identifier.name, "TestAttrNullableArrayOfNullableTypes", "Interface has the right name") harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData)) attrs = iface.members for i in range(len(attrs)): data = testData[i] attr = attrs[i] (QName, name, type, readonly) = data checkAttr(attr, QName % "NullableArrayOfNullableTypes", name, type % "OrNullArrayOrNull", readonly) parser = parser.reset() threw = False try: parser.parse(""" interface A { [SetterInfallible] readonly attribute boolean foo; }; """) results = parser.finish() except Exception, x: threw = True harness.ok(threw, "Should not allow [SetterInfallible] on readonly attributes")
mpl-2.0
azumimuo/family-xbmc-addon
script.module.t0mm0.common/lib/t0mm0/common/addon.py
15
26656
''' common XBMC Module Copyright (C) 2011 t0mm0 This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import cgi import re import os try: import cPickle as pickle except: import pickle import unicodedata import urllib import xbmc import xbmcaddon import xbmcgui import xbmcplugin class Addon: ''' This class provides a lot of code that is used across many XBMC addons in the hope that it will simplify some of the common tasks an addon needs to perform. Mostly this is achieved by providing a wrapper around commonly used parts of :mod:`xbmc`, :mod:`xbmcaddon`, :mod:`xbmcgui` and :mod:`xbmcplugin`. You probably want to have exactly one instance of this class in your addon which you can call from anywhere in your code. Example:: import sys from t0mm0.common.addon import Addon addon = Addon('my.plugin.id', argv=sys.argv) ''' def __init__(self, addon_id, argv=None): ''' Args: addon_id (str): Your addon's id (eg. 'plugin.video.t0mm0.test'). Kwargs: argv (list): List of arguments passed to your addon if applicable (eg. sys.argv). ''' self.addon = xbmcaddon.Addon(id=addon_id) if argv: self.url = argv[0] self.handle = int(argv[1]) self.queries = self.parse_query(argv[2][1:]) def get_author(self): '''Returns the addon author as defined in ``addon.xml``.''' return self.addon.getAddonInfo('author') def get_changelog(self): '''Returns the addon changelog.''' return self.addon.getAddonInfo('changelog') def get_description(self): '''Returns the addon description as defined in ``addon.xml``.''' return self.addon.getAddonInfo('description') def get_disclaimer(self): '''Returns the addon disclaimer as defined in ``addon.xml``.''' return self.addon.getAddonInfo('disclaimer') def get_fanart(self): '''Returns the full path to the addon fanart.''' return self.addon.getAddonInfo('fanart') def get_icon(self): '''Returns the full path to the addon icon.''' return self.addon.getAddonInfo('icon') def get_id(self): '''Returns the addon id as defined in ``addon.xml``.''' return self.addon.getAddonInfo('id') def get_name(self): '''Returns the addon name as defined in ``addon.xml``.''' return self.addon.getAddonInfo('name') def get_path(self): '''Returns the full path to the addon directory.''' return self.addon.getAddonInfo('path') def get_profile(self): ''' Returns the full path to the addon profile directory (useful for storing files needed by the addon such as cookies). ''' return xbmc.translatePath(self.addon.getAddonInfo('profile')) def get_stars(self): '''Returns the number of stars for this addon.''' return self.addon.getAddonInfo('stars') def get_summary(self): '''Returns the addon summary as defined in ``addon.xml``.''' return self.addon.getAddonInfo('summary') def get_type(self): ''' Returns the addon summary as defined in ``addon.xml`` (eg. xbmc.python.pluginsource). ''' return self.addon.getAddonInfo('type') def get_version(self): '''Returns the addon version as defined in ``addon.xml``.''' return self.addon.getAddonInfo('version') def get_setting(self, setting): ''' Returns an addon setting. Settings must be defined in your addon's ``resources/settings.xml`` file. Args: setting (str): Name of the setting to be retrieved. Returns: str containing the requested setting. ''' return self.addon.getSetting(setting) def get_string(self, string_id): ''' Returns a localized string. Strings must be defined in your addon's ``resources/language/[lang_name]/strings.xml`` file. Args: string_id (int): id of the translated string to retrieve. Returns: str containing the localized requested string. ''' return self.addon.getLocalizedString(string_id) def parse_query(self, query, defaults={'mode': 'main'}): ''' Parse a query string as used in a URL or passed to your addon by XBMC. Example: >>> addon.parse_query('name=test&type=basic') {'mode': 'main', 'name': 'test', 'type': 'basic'} Args: query (str): A query string. Kwargs: defaults (dict): A dictionary containing key/value pairs parsed from the query string. If a key is repeated in the query string its value will be a list containing all of that keys values. ''' queries = cgi.parse_qs(query) q = defaults for key, value in queries.items(): if len(value) == 1: q[key] = value[0] else: q[key] = value return q def build_plugin_url(self, queries): ''' Returns a ``plugin://`` URL which can be used to call the addon with the specified queries. Example: >>> addon.build_plugin_url({'name': 'test', 'type': 'basic'}) 'plugin://your.plugin.id/?name=test&type=basic' Args: queries (dict): A dctionary of keys/values to be added to the ``plugin://`` URL. Retuns: A string containing a fully formed ``plugin://`` URL. ''' out_dict = {} for k, v in queries.iteritems(): if isinstance(v, unicode): v = v.encode('utf8') elif isinstance(v, str): # Must be encoded in UTF-8 v.decode('utf8') out_dict[k] = v return self.url + '?' + urllib.urlencode(out_dict) def log(self, msg, level=xbmc.LOGNOTICE): ''' Writes a string to the XBMC log file. The addon name is inserted into the beginning of the message automatically to help you find relevent messages in the log file. The available log levels are defined in the :mod:`xbmc` module and are currently as follows:: xbmc.LOGDEBUG = 0 xbmc.LOGERROR = 4 xbmc.LOGFATAL = 6 xbmc.LOGINFO = 1 xbmc.LOGNONE = 7 xbmc.LOGNOTICE = 2 xbmc.LOGSEVERE = 5 xbmc.LOGWARNING = 3 Args: msg (str or unicode): The message to be written to the log file. Kwargs: level (int): The XBMC log level to write at. ''' #msg = unicodedata.normalize('NFKD', unicode(msg)).encode('ascii', # 'ignore') xbmc.log('%s: %s' % (self.get_name(), msg), level) def log_error(self, msg): ''' Convenience method to write to the XBMC log file at the ``xbmc.LOGERROR`` error level. Use when something has gone wrong in your addon code. This will show up in the log prefixed with 'ERROR:' whether you have debugging switched on or not. ''' self.log(msg, xbmc.LOGERROR) def log_debug(self, msg): ''' Convenience method to write to the XBMC log file at the ``xbmc.LOGDEBUG`` error level. Use this when you want to print out lots of detailed information that is only usefull for debugging. This will show up in the log only when debugging is enabled in the XBMC settings, and will be prefixed with 'DEBUG:'. ''' self.log(msg, xbmc.LOGDEBUG) def log_notice(self, msg): ''' Convenience method to write to the XBMC log file at the ``xbmc.LOGNOTICE`` error level. Use for general log messages. This will show up in the log prefixed with 'NOTICE:' whether you have debugging switched on or not. ''' self.log(msg, xbmc.LOGNOTICE) def show_ok_dialog(self, msg, title=None, is_error=False): ''' Display an XBMC dialog with a message and a single 'OK' button. The message is also written to the XBMC log file at the appropriate log level. .. warning:: Don't forget that `msg` must be a list of strings and not just a string even if you only want to display a single line! Example:: addon.show_ok_dialog(['My message'], 'My Addon') Args: msg (list of strings): The message to be displayed in the dialog. Only the first 3 list items will be displayed. Kwargs: title (str): String to be displayed as the title of the dialog box. Defaults to the addon name. is_error (bool): If ``True``, the log message will be written at the ERROR log level, otherwise NOTICE will be used. ''' if not title: title = self.get_name() log_msg = ' '.join(msg) while len(msg) < 3: msg.append('') if is_error: self.log_error(log_msg) else: self.log_notice(log_msg) xbmcgui.Dialog().ok(title, msg[0], msg[1], msg[2]) def show_error_dialog(self, msg): ''' Convenience method to show an XBMC dialog box with a single OK button and also write the message to the log file at the ERROR log level. The title of the dialog will be the addon's name with the prefix 'Error: '. .. warning:: Don't forget that `msg` must be a list of strings and not just a string even if you only want to display a single line! Args: msg (list of strings): The message to be displayed in the dialog. Only the first 3 list items will be displayed. ''' self.show_ok_dialog(msg, 'Error: %s' % self.get_name(), True) def show_small_popup(self, title='', msg='', delay=5000, image=''): ''' Displays a small popup box in the lower right corner. The default delay is 5 seconds. Code inspired by anarchintosh and daledude's Icefilms addon. Example:: import os logo = os.path.join(addon.get_path(), 'art','logo.jpg') addon.show_small_popup('MyAddonName','Is now loaded enjoy', 5000, logo) Kwargs: title (str): title to be displayed at the top of the box msg (str): Main message body delay (int): delay in milliseconds until it disapears image (str): Path to the image you want to display ''' xbmc.executebuiltin('XBMC.Notification("%s","%s",%d,"%s")' % (title, msg, delay, image)) def show_countdown(self, time_to_wait, title='', text=''): ''' Show a countdown dialog with a progress bar for XBMC while delaying execution. Necessary for some filehosters eg. megaupload The original version of this code came from Anarchintosh. Args: time_to_wait (int): number of seconds to pause for. Kwargs: title (str): Displayed in the title of the countdown dialog. Default is blank. text (str): A line of text to be displayed in the dialog. Default is blank. Returns: ``True`` if countdown is allowed to complete, ``False`` if the user cancelled the countdown. ''' dialog = xbmcgui.DialogProgress() ret = dialog.create(title) self.log_notice('waiting %d secs' % time_to_wait) secs = 0 increment = 100 / time_to_wait cancelled = False while secs <= time_to_wait: if (dialog.iscanceled()): cancelled = True break if secs != 0: xbmc.sleep(1000) secs_left = time_to_wait - secs if secs_left == 0: percent = 100 else: percent = increment * secs remaining_display = ('Wait %d seconds for the ' + 'video stream to activate...') % secs_left dialog.update(percent, text, remaining_display) secs += 1 if cancelled == True: self.log_notice('countdown cancelled') return False else: self.log_debug('countdown finished waiting') return True def show_settings(self): '''Shows the settings dialog for this addon.''' self.addon.openSettings() def resolve_url(self, stream_url): ''' Tell XBMC that you have resolved a URL (or not!). This method should be called as follows: #. The user selects a list item that has previously had ``isPlayable`` set (this is true for items added with :meth:`add_item`, :meth:`add_music_item` or :meth:`add_music_item`) #. Your code resolves the item requested by the user to a media URL #. Your addon calls this method with the resolved URL Args: stream_url (str or ``False``): If a string, tell XBMC that the media URL ha been successfully resolved to stream_url. If ``False`` or an empty string tell XBMC the resolving failed and pop up an error messsage. ''' if stream_url: self.log_debug('resolved to: %s' % stream_url) xbmcplugin.setResolvedUrl(self.handle, True, xbmcgui.ListItem(path=stream_url)) else: self.show_error_dialog(['sorry, failed to resolve URL :(']) xbmcplugin.setResolvedUrl(self.handle, False, xbmcgui.ListItem()) def get_playlist(self, pl_type, new=False): ''' Return a :class:`xbmc.Playlist` object of the specified type. The available playlist types are defined in the :mod:`xbmc` module and are currently as follows:: xbmc.PLAYLIST_MUSIC = 0 xbmc.PLAYLIST_VIDEO = 1 .. seealso:: :meth:`get_music_playlist`, :meth:`get_video_playlist` Args: pl_type (int): The type of playlist to get. new (bool): If ``False`` (default), get the current :class:`xbmc.Playlist` object of the type specified. If ``True`` then return a new blank :class:`xbmc.Playlist`. Returns: A :class:`xbmc.Playlist` object. ''' pl = xbmc.PlayList(pl_type) if new: pl.clear() return pl def get_music_playlist(self, new=False): ''' Convenience method to return a music :class:`xbmc.Playlist` object. .. seealso:: :meth:`get_playlist` Kwargs: new (bool): If ``False`` (default), get the current music :class:`xbmc.Playlist` object. If ``True`` then return a new blank music :class:`xbmc.Playlist`. Returns: A :class:`xbmc.Playlist` object. ''' self.get_playlist(xbmc.PLAYLIST_MUSIC, new) def get_video_playlist(self, new=False): ''' Convenience method to return a video :class:`xbmc.Playlist` object. .. seealso:: :meth:`get_playlist` Kwargs: new (bool): If ``False`` (default), get the current video :class:`xbmc.Playlist` object. If ``True`` then return a new blank video :class:`xbmc.Playlist`. Returns: A :class:`xbmc.Playlist` object. ''' self.get_playlist(xbmc.PLAYLIST_VIDEO, new) def add_item(self, queries, infolabels, contextmenu_items='', context_replace=False, img='', fanart='', resolved=False, total_items=0, playlist=False, item_type='video', is_folder=False): ''' Adds an item to the list of entries to be displayed in XBMC or to a playlist. Use this method when you want users to be able to select this item to start playback of a media file. ``queries`` is a dict that will be sent back to the addon when this item is selected:: add_item({'host': 'youtube.com', 'media_id': 'ABC123XYZ'}, {'title': 'A youtube vid'}) will add a link to:: plugin://your.plugin.id/?host=youtube.com&media_id=ABC123XYZ .. seealso:: :meth:`add_music_item`, :meth:`add_video_item`, :meth:`add_directory` Args: queries (dict): A set of keys/values to be sent to the addon when the user selects this item. infolabels (dict): A dictionary of information about this media (see the `XBMC Wiki InfoLabels entry <http://wiki.xbmc.org/?title=InfoLabels>`_). Kwargs: contextmenu_items (list): A list of contextmenu items context_replace (bool): To replace the xbmc default contextmenu items img (str): A URL to an image file to be used as an icon for this entry. fanart (str): A URL to a fanart image for this entry. resolved (str): If not empty, ``queries`` will be ignored and instead the added item will be the exact contentes of ``resolved``. total_items (int): Total number of items to be added in this list. If supplied it enables XBMC to show a progress bar as the list of items is being built. playlist (playlist object): If ``False`` (default), the item will be added to the list of entries to be displayed in this directory. If a playlist object is passed (see :meth:`get_playlist`) then the item will be added to the playlist instead item_type (str): The type of item to add (eg. 'music', 'video' or 'pictures') ''' infolabels = self.unescape_dict(infolabels) if not resolved: if not is_folder: queries['play'] = 'True' play = self.build_plugin_url(queries) else: play = resolved listitem = xbmcgui.ListItem(infolabels['title'], iconImage=img, thumbnailImage=img) listitem.setInfo(item_type, infolabels) listitem.setProperty('IsPlayable', 'true') listitem.setProperty('fanart_image', fanart) if contextmenu_items: listitem.addContextMenuItems(contextmenu_items, replaceItems=context_replace) if playlist is not False: self.log_debug('adding item: %s - %s to playlist' % \ (infolabels['title'], play)) playlist.add(play, listitem) else: self.log_debug('adding item: %s - %s' % (infolabels['title'], play)) xbmcplugin.addDirectoryItem(self.handle, play, listitem, isFolder=is_folder, totalItems=total_items) def add_video_item(self, queries, infolabels, contextmenu_items='', context_replace=False, img='', fanart='', resolved=False, total_items=0, playlist=False): ''' Convenience method to add a video item to the directory list or a playlist. See :meth:`add_item` for full infomation ''' self.add_item(queries, infolabels, contextmenu_items, context_replace, img, fanart, resolved, total_items, playlist, item_type='video') def add_music_item(self, queries, infolabels, contextmenu_items='', context_replace=False, img='', fanart='', resolved=False, total_items=0, playlist=False): ''' Convenience method to add a music item to the directory list or a playlist. See :meth:`add_item` for full infomation ''' self.add_item(queries, infolabels, contextmenu_items, img, context_replace, fanart, resolved, total_items, playlist, item_type='music') def add_directory(self, queries, infolabels, contextmenu_items='', context_replace=False, img='', fanart='', total_items=0, is_folder=True): ''' Convenience method to add a directory to the display list or a playlist. See :meth:`add_item` for full infomation ''' self.add_item(queries, infolabels, contextmenu_items, context_replace, img, fanart, total_items=total_items, resolved=self.build_plugin_url(queries), is_folder=is_folder) def end_of_directory(self): '''Tell XBMC that we have finished adding items to this directory.''' xbmcplugin.endOfDirectory(self.handle) def _decode_callback(self, matches): '''Callback method used by :meth:`decode`.''' id = matches.group(1) try: return unichr(int(id)) except: return id def decode(self, data): ''' Regular expression to convert entities such as ``&#044`` to the correct characters. It is called by :meth:`unescape` and so it is not required to call it directly. This method was found `on the web <http://stackoverflow.com/questions/1208916/decoding-html-entities-with-python/1208931#1208931>`_ Args: data (str): String to be cleaned. Returns: Cleaned string. ''' return re.sub("&#(\d+)(;|(?=\s))", self._decode_callback, data).strip() def unescape(self, text): ''' Decodes HTML entities in a string. You can add more entities to the ``rep`` dictionary. Args: text (str): String to be cleaned. Returns: Cleaned string. ''' try: text = self.decode(text) rep = {'&lt;': '<', '&gt;': '>', '&quot': '"', '&rsquo;': '\'', '&acute;': '\'', } for s, r in rep.items(): text = text.replace(s, r) # this has to be last: text = text.replace("&amp;", "&") #we don't want to fiddle with non-string types except TypeError: pass return text def unescape_dict(self, d): ''' Calls :meth:`unescape` on all values in a dictionary. Args: d (dict): A dictionary containing string values Returns: A dictionary with HTML entities removed from the values. ''' out = {} for key, value in d.items(): out[key] = self.unescape(value) return out def save_data(self, filename, data): ''' Saves the data structure using pickle. If the addon data path does not exist it will be automatically created. This save function has the same restrictions as the pickle module. Args: filename (string): name of the file you want to save data to. This file will be saved in your addon's profile directory. data (data object/string): you want to save. Returns: True on success False on failure ''' profile_path = self.get_profile() try: os.makedirs(profile_path) except: pass save_path = os.path.join(profile_path, filename) try: pickle.dump(data, open(save_path, 'wb')) return True except pickle.PickleError: return False def load_data(self,filename): ''' Load the data that was saved with save_data() and returns the data structure. Args: filename (string): Name of the file you want to load data from. This file will be loaded from your addons profile directory. Returns: Data stucture on success False on failure ''' profile_path = self.get_profile() load_path = os.path.join(profile_path, filename) print profile_path if not os.path.isfile(load_path): self.log_debug('%s does not exist' % load_path) return False try: data = pickle.load(open(load_path)) except: return False return data
gpl-2.0
sikmir/QGIS
tests/src/python/test_qgsdelimitedtextprovider.py
2
30990
# -*- coding: utf-8 -*- """QGIS Unit tests for QgsDelimitedTextProvider. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Chris Crook' __date__ = '20/04/2013' __copyright__ = 'Copyright 2013, The QGIS Project' # This module provides unit test for the delimited text provider. It uses data files in # the testdata/delimitedtext directory. # # New tests can be created (or existing ones updated), but incorporating a createTest # call into the test. This will load the file and generate a test that the features # loaded from it are correct. It assumes that the data is correct at the time the # test is created. The new test is written to the test output file, and can be edited into # this module to implement the test. # # To recreate all tests, set rebuildTests to true import qgis # NOQA import os import re import tempfile import inspect import time import test_qgsdelimitedtextprovider_wanted as want # NOQA import collections rebuildTests = 'REBUILD_DELIMITED_TEXT_TESTS' in os.environ from qgis.PyQt.QtCore import QCoreApplication, QUrl, QObject from qgis.core import ( QgsProviderRegistry, QgsVectorLayer, QgsFeatureRequest, QgsRectangle, QgsApplication, QgsFeature) from qgis.testing import start_app, unittest from utilities import unitTestDataPath, compareWkt from providertestbase import ProviderTestCase start_app() TEST_DATA_DIR = unitTestDataPath() geomkey = "#geometry" fidkey = "#fid" try: # Qt 5 from qgis.PyQt.QtCore import QUrlQuery class MyUrl: def __init__(self, url): self.url = url self.query = QUrlQuery() @classmethod def fromLocalFile(cls, filename): return cls(QUrl.fromLocalFile(filename)) def addQueryItem(self, k, v): self.query.addQueryItem(k, v) def toString(self): urlstr = self.url.toString() querystr = self.query.toString(QUrl.FullyDecoded) if querystr != '': urlstr += '?' urlstr += querystr return urlstr except: MyUrl = QUrl def normalize_query_items_order(s): split_url = s.split('?') urlstr = split_url[0] if len(split_url) == 2: items_list = split_url[1].split('&') items_map = {} for item in items_list: split_item = item.split('=') items_map[split_item[0]] = split_item[1] first_arg = True for k in sorted(items_map.keys()): if first_arg: urlstr += '?' first_arg = False else: urlstr += '&' urlstr += k + '=' + items_map[k] return urlstr # Thought we could connect to messageReceived signal but doesn't seem to be available # in python :-( Not sure why? class MessageLogger(QObject): def __init__(self, tag=None): QObject.__init__(self) self.log = [] self.tag = tag def __enter__(self): QgsApplication.messageLog().messageReceived.connect(self.logMessage) return self def __exit__(self, type, value, traceback): QgsApplication.messageLog().messageReceived.disconnect(self.logMessage) def logMessage(self, msg, tag, level): if tag == self.tag or not self.tag: self.log.append(str(msg)) def messages(self): return self.log class TestQgsDelimitedTextProviderXY(unittest.TestCase, ProviderTestCase): @classmethod def setUpClass(cls): """Run before all tests""" # Create test layer srcpath = os.path.join(TEST_DATA_DIR, 'provider') cls.basetestfile = os.path.join(srcpath, 'delimited_xy.csv') url = MyUrl.fromLocalFile(cls.basetestfile) url.addQueryItem("crs", "epsg:4326") url.addQueryItem("type", "csv") url.addQueryItem("xField", "X") url.addQueryItem("yField", "Y") url.addQueryItem("spatialIndex", "no") url.addQueryItem("subsetIndex", "no") url.addQueryItem("watchFile", "no") cls.vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext') assert cls.vl.isValid(), "{} is invalid".format(cls.basetestfile) cls.source = cls.vl.dataProvider() @classmethod def tearDownClass(cls): """Run after all tests""" class TestQgsDelimitedTextProviderWKT(unittest.TestCase, ProviderTestCase): @classmethod def setUpClass(cls): """Run before all tests""" # Create test layer srcpath = os.path.join(TEST_DATA_DIR, 'provider') cls.basetestfile = os.path.join(srcpath, 'delimited_wkt.csv') url = MyUrl.fromLocalFile(cls.basetestfile) url.addQueryItem("crs", "epsg:4326") url.addQueryItem("type", "csv") url.addQueryItem("wktField", "wkt") url.addQueryItem("spatialIndex", "no") url.addQueryItem("subsetIndex", "no") url.addQueryItem("watchFile", "no") cls.vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext') assert cls.vl.isValid(), "{} is invalid".format(cls.basetestfile) cls.source = cls.vl.dataProvider() cls.basetestpolyfile = os.path.join(srcpath, 'delimited_wkt_poly.csv') url = MyUrl.fromLocalFile(cls.basetestpolyfile) url.addQueryItem("crs", "epsg:4326") url.addQueryItem("type", "csv") url.addQueryItem("wktField", "wkt") url.addQueryItem("spatialIndex", "no") url.addQueryItem("subsetIndex", "no") url.addQueryItem("watchFile", "no") cls.vl_poly = QgsVectorLayer(url.toString(), 'test_polygon', 'delimitedtext') assert cls.vl_poly.isValid(), "{} is invalid".format(cls.basetestpolyfile) cls.poly_provider = cls.vl_poly.dataProvider() @classmethod def tearDownClass(cls): """Run after all tests""" class TestQgsDelimitedTextProviderOther(unittest.TestCase): @classmethod def setUpClass(cls): """Run before all tests""" # toggle full ctest output to debug flaky CI test print('CTEST_FULL_OUTPUT') def layerData(self, layer, request={}, offset=0): # Retrieve the data for a layer first = True data = {} fields = [] fieldTypes = [] fr = QgsFeatureRequest() if request: if 'exact' in request and request['exact']: fr.setFlags(QgsFeatureRequest.ExactIntersect) if 'nogeom' in request and request['nogeom']: fr.setFlags(QgsFeatureRequest.NoGeometry) if 'fid' in request: fr.setFilterFid(request['fid']) elif 'extents' in request: fr.setFilterRect(QgsRectangle(*request['extents'])) if 'attributes' in request: fr.setSubsetOfAttributes(request['attributes']) # IMPORTANT - we do not use `for f in layer.getFeatures(fr):` as we need # to verify that existing attributes and geometry are correctly cleared # from the feature when calling nextFeature() it = layer.getFeatures(fr) f = QgsFeature() while it.nextFeature(f): if first: first = False for field in f.fields(): fields.append(str(field.name())) fieldTypes.append(str(field.typeName())) fielddata = dict((name, str(f[name])) for name in fields) g = f.geometry() if not g.isNull(): fielddata[geomkey] = str(g.asWkt()) else: fielddata[geomkey] = "None" fielddata[fidkey] = f.id() id = fielddata[fields[0]] description = fielddata[fields[1]] fielddata['id'] = id fielddata['description'] = description data[f.id() + offset] = fielddata if 'id' not in fields: fields.insert(0, 'id') if 'description' not in fields: fields.insert(1, 'description') fields.append(fidkey) fields.append(geomkey) return fields, fieldTypes, data def delimitedTextData(self, testname, filename, requests, verbose, **params): # Retrieve the data for a delimited text url # Create a layer for the specified file and query parameters # and return the data for the layer (fields, data) filepath = os.path.join(unitTestDataPath("delimitedtext"), filename) url = MyUrl.fromLocalFile(filepath) if not requests: requests = [{}] for k in list(params.keys()): url.addQueryItem(k, params[k]) urlstr = url.toString() log = [] with MessageLogger('DelimitedText') as logger: if verbose: print(testname) layer = QgsVectorLayer(urlstr, 'test', 'delimitedtext') uri = layer.dataProvider().dataSourceUri() if verbose: print(uri) basename = os.path.basename(filepath) if not basename.startswith('test'): basename = 'file' uri = re.sub(r'^file\:\/\/[^\?]*', 'file://' + basename, uri) fields = [] fieldTypes = [] data = {} if layer.isValid(): for nr, r in enumerate(requests): if verbose: print(("Processing request", nr + 1, repr(r))) if isinstance(r, collections.Callable): r(layer) if verbose: print("Request function executed") if isinstance(r, collections.Callable): continue rfields, rtypes, rdata = self.layerData(layer, r, nr * 1000) if len(rfields) > len(fields): fields = rfields fieldTypes = rtypes data.update(rdata) if not rdata: log.append("Request " + str(nr) + " did not return any data") if verbose: print(("Request returned", len(list(rdata.keys())), "features")) for msg in logger.messages(): filelogname = 'temp_file' if 'tmp' in filename.lower() else filename msg = re.sub(r'file\s+.*' + re.escape(filename), 'file ' + filelogname, msg) msg = msg.replace(filepath, filelogname) log.append(msg) return dict(fields=fields, fieldTypes=fieldTypes, data=data, log=log, uri=uri, geometryType=layer.geometryType()) def printWanted(self, testname, result): # Routine to export the result as a function definition print() print(("def {0}():".format(testname))) data = result['data'] log = result['log'] fields = result['fields'] prefix = ' ' # Dump the data for a layer - used to construct unit tests print((prefix + "wanted={}")) print((prefix + "wanted['uri']=" + repr(result['uri']))) print((prefix + "wanted['fieldTypes']=" + repr(result['fieldTypes']))) print((prefix + "wanted['geometryType']=" + repr(result['geometryType']))) print((prefix + "wanted['data']={")) for k in sorted(data.keys()): row = data[k] print((prefix + " {0}: {{".format(repr(k)))) for f in fields: print((prefix + " " + repr(f) + ": " + repr(row[f]) + ",")) print((prefix + " },")) print((prefix + " }")) print((prefix + "wanted['log']=[")) for msg in log: print((prefix + ' ' + repr(msg) + ',')) print((prefix + ' ]')) print(' return wanted') print('', flush=True) def recordDifference(self, record1, record2): # Compare a record defined as a dictionary for k in list(record1.keys()): if k not in record2: return "Field {0} is missing".format(k) r1k = record1[k] r2k = record2[k] if k == geomkey: if not compareWkt(r1k, r2k): return "Geometry differs: {0:.50} versus {1:.50}".format(r1k, r2k) else: if record1[k] != record2[k]: return "Field {0} differs: {1:.50} versus {2:.50}".format(k, repr(r1k), repr(r2k)) for k in list(record2.keys()): if k not in record1: return "Output contains extra field {0}".format(k) return '' def runTest(self, file, requests, **params): testname = inspect.stack()[1][3] verbose = not rebuildTests if verbose: print(("Running test:", testname)) result = self.delimitedTextData(testname, file, requests, verbose, **params) if rebuildTests: self.printWanted(testname, result) assert False, "Test not run - being rebuilt" try: wanted = eval('want.{0}()'.format(testname)) except: self.printWanted(testname, result) assert False, "Test results not available for {0}".format(testname) data = result['data'] log = result['log'] failures = [] if normalize_query_items_order(result['uri']) != normalize_query_items_order(wanted['uri']): msg = "Layer Uri ({0}) doesn't match expected ({1})".format( normalize_query_items_order(result['uri']), normalize_query_items_order(wanted['uri'])) print((' ' + msg)) failures.append(msg) if result['fieldTypes'] != wanted['fieldTypes']: msg = "Layer field types ({0}) doesn't match expected ({1})".format( result['fieldTypes'], wanted['fieldTypes']) failures.append(msg) if result['geometryType'] != wanted['geometryType']: msg = "Layer geometry type ({0}) doesn't match expected ({1})".format( result['geometryType'], wanted['geometryType']) failures.append(msg) wanted_data = wanted['data'] for id in sorted(wanted_data.keys()): print('getting wanted data') wrec = wanted_data[id] print('getting received data') trec = data.get(id, {}) print('getting description') description = wrec['description'] print('getting difference') difference = self.recordDifference(wrec, trec) if not difference: print((' {0}: Passed'.format(description))) else: print((' {0}: {1}'.format(description, difference))) failures.append(description + ': ' + difference) for id in sorted(data.keys()): if id not in wanted_data: msg = "Layer contains unexpected extra data with id: \"{0}\"".format(id) print((' ' + msg)) failures.append(msg) common = [] log_wanted = wanted['log'] for l in log: if l in log_wanted: common.append(l) for l in log_wanted: if l not in common: msg = 'Missing log message: ' + l print((' ' + msg)) failures.append(msg) for l in log: if l not in common: msg = 'Extra log message: ' + l print((' ' + msg)) failures.append(msg) if len(log) == len(common) and len(log_wanted) == len(common): print(' Message log correct: Passed') if failures: self.printWanted(testname, result) assert len(failures) == 0, "\n".join(failures) def test_001_provider_defined(self): registry = QgsProviderRegistry.instance() metadata = registry.providerMetadata('delimitedtext') assert metadata is not None, "Delimited text provider is not installed" def test_002_load_csv_file(self): # CSV file parsing filename = 'test.csv' params = {'geomType': 'none', 'type': 'csv'} requests = None self.runTest(filename, requests, **params) def test_003_field_naming(self): # Management of missing/duplicate/invalid field names filename = 'testfields.csv' params = {'geomType': 'none', 'type': 'csv'} requests = None self.runTest(filename, requests, **params) def test_004_max_fields(self): # Limiting maximum number of fields filename = 'testfields.csv' params = {'geomType': 'none', 'maxFields': '7', 'type': 'csv'} requests = None self.runTest(filename, requests, **params) def test_005_load_whitespace(self): # Whitespace file parsing filename = 'test.space' params = {'geomType': 'none', 'type': 'whitespace'} requests = None self.runTest(filename, requests, **params) def test_006_quote_escape(self): # Quote and escape file parsing filename = 'test.pipe' params = {'geomType': 'none', 'quote': '"', 'delimiter': '|', 'escape': '\\'} requests = None self.runTest(filename, requests, **params) def test_007_multiple_quote(self): # Multiple quote and escape characters filename = 'test.quote' params = {'geomType': 'none', 'quote': '\'"', 'type': 'csv', 'escape': '"\''} requests = None self.runTest(filename, requests, **params) def test_008_badly_formed_quotes(self): # Badly formed quoted fields filename = 'test.badquote' params = {'geomType': 'none', 'quote': '"', 'type': 'csv', 'escape': '"'} requests = None self.runTest(filename, requests, **params) def test_009_skip_lines(self): # Skip lines filename = 'test2.csv' params = {'geomType': 'none', 'useHeader': 'no', 'type': 'csv', 'skipLines': '2'} requests = None self.runTest(filename, requests, **params) def test_010_read_coordinates(self): # Skip lines filename = 'testpt.csv' params = {'yField': 'geom_y', 'xField': 'geom_x', 'type': 'csv'} requests = None self.runTest(filename, requests, **params) def test_011_read_wkt(self): # Reading WKT geometry field filename = 'testwkt.csv' params = {'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'} requests = None self.runTest(filename, requests, **params) def test_012_read_wkt_point(self): # Read WKT points filename = 'testwkt.csv' params = {'geomType': 'point', 'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'} requests = None self.runTest(filename, requests, **params) def test_013_read_wkt_line(self): # Read WKT linestrings filename = 'testwkt.csv' params = {'geomType': 'line', 'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'} requests = None self.runTest(filename, requests, **params) def test_014_read_wkt_polygon(self): # Read WKT polygons filename = 'testwkt.csv' params = {'geomType': 'polygon', 'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'} requests = None self.runTest(filename, requests, **params) def test_015_read_dms_xy(self): # Reading degrees/minutes/seconds angles filename = 'testdms.csv' params = {'yField': 'lat', 'xField': 'lon', 'type': 'csv', 'xyDms': 'yes'} requests = None self.runTest(filename, requests, **params) def test_016_decimal_point(self): # Reading degrees/minutes/seconds angles filename = 'testdp.csv' params = {'yField': 'geom_y', 'xField': 'geom_x', 'type': 'csv', 'delimiter': ';', 'decimalPoint': ','} requests = None self.runTest(filename, requests, **params) def test_017_regular_expression_1(self): # Parsing regular expression delimiter filename = 'testre.txt' params = {'geomType': 'none', 'trimFields': 'Y', 'delimiter': 'RE(?:GEXP)?', 'type': 'regexp'} requests = None self.runTest(filename, requests, **params) def test_018_regular_expression_2(self): # Parsing regular expression delimiter with capture groups filename = 'testre.txt' params = {'geomType': 'none', 'trimFields': 'Y', 'delimiter': '(RE)(GEXP)?', 'type': 'regexp'} requests = None self.runTest(filename, requests, **params) def test_019_regular_expression_3(self): # Parsing anchored regular expression filename = 'testre2.txt' params = {'geomType': 'none', 'trimFields': 'Y', 'delimiter': '^(.{5})(.{30})(.{5,})', 'type': 'regexp'} requests = None self.runTest(filename, requests, **params) def test_020_regular_expression_4(self): # Parsing zero length re filename = 'testre3.txt' params = {'geomType': 'none', 'delimiter': 'x?', 'type': 'regexp'} requests = None self.runTest(filename, requests, **params) def test_021_regular_expression_5(self): # Parsing zero length re 2 filename = 'testre3.txt' params = {'geomType': 'none', 'delimiter': '\\b', 'type': 'regexp'} requests = None self.runTest(filename, requests, **params) def test_022_utf8_encoded_file(self): # UTF8 encoded file test filename = 'testutf8.csv' params = {'geomType': 'none', 'delimiter': '|', 'type': 'csv', 'encoding': 'utf-8'} requests = None self.runTest(filename, requests, **params) def test_023_latin1_encoded_file(self): # Latin1 encoded file test filename = 'testlatin1.csv' params = {'geomType': 'none', 'delimiter': '|', 'type': 'csv', 'encoding': 'latin1'} requests = None self.runTest(filename, requests, **params) def test_024_filter_rect_xy(self): # Filter extents on XY layer filename = 'testextpt.txt' params = {'yField': 'y', 'delimiter': '|', 'type': 'csv', 'xField': 'x'} requests = [ {'extents': [10, 30, 30, 50]}, {'extents': [10, 30, 30, 50], 'exact': 1}, {'extents': [110, 130, 130, 150]}] self.runTest(filename, requests, **params) def test_025_filter_rect_wkt(self): # Filter extents on WKT layer filename = 'testextw.txt' params = {'delimiter': '|', 'type': 'csv', 'wktField': 'wkt'} requests = [ {'extents': [10, 30, 30, 50]}, {'extents': [10, 30, 30, 50], 'exact': 1}, {'extents': [110, 130, 130, 150]}] self.runTest(filename, requests, **params) def test_026_filter_fid(self): # Filter on feature id filename = 'test.csv' params = {'geomType': 'none', 'type': 'csv'} requests = [ {'fid': 3}, {'fid': 9}, {'fid': 20}, {'fid': 3}] self.runTest(filename, requests, **params) def test_027_filter_attributes(self): # Filter on attributes filename = 'test.csv' params = {'geomType': 'none', 'type': 'csv'} requests = [ {'attributes': [1, 3]}, {'fid': 9}, {'attributes': [1, 3], 'fid': 9}, {'attributes': [3, 1], 'fid': 9}, {'attributes': [1, 3, 7], 'fid': 9}, {'attributes': [], 'fid': 9}] self.runTest(filename, requests, **params) def test_028_substring_test(self): # CSV file parsing filename = 'test.csv' params = {'geomType': 'none', 'subset': 'id % 2 = 1', 'type': 'csv'} requests = None self.runTest(filename, requests, **params) def test_029_file_watcher(self): # Testing file watcher (filehandle, filename) = tempfile.mkstemp() if os.name == "nt": filename = filename.replace("\\", "/") with os.fdopen(filehandle, "w") as f: f.write("id,name\n1,rabbit\n2,pooh\n") def appendfile(layer): with open(filename, 'a') as f: f.write('3,tiger\n') # print "Appended to file - sleeping" time.sleep(1) QCoreApplication.instance().processEvents() def rewritefile(layer): with open(filename, 'w') as f: f.write("name,size,id\ntoad,small,5\nmole,medium,6\nbadger,big,7\n") # print "Rewritten file - sleeping" time.sleep(1) QCoreApplication.instance().processEvents() def deletefile(layer): try: os.remove(filename) except: open(filename, "w").close() assert os.path.getsize(filename) == 0, "removal and truncation of {} failed".format(filename) # print "Deleted file - sleeping" time.sleep(1) QCoreApplication.instance().processEvents() params = {'geomType': 'none', 'type': 'csv', 'watchFile': 'yes'} requests = [ {'fid': 3}, {}, {'fid': 7}, appendfile, {'fid': 3}, {'fid': 4}, {}, {'fid': 7}, rewritefile, {'fid': 2}, {}, {'fid': 7}, deletefile, {'fid': 2}, {}, rewritefile, {'fid': 2}, ] self.runTest(filename, requests, **params) def test_030_filter_rect_xy_spatial_index(self): # Filter extents on XY layer with spatial index filename = 'testextpt.txt' params = {'yField': 'y', 'delimiter': '|', 'type': 'csv', 'xField': 'x', 'spatialIndex': 'Y'} requests = [ {'extents': [10, 30, 30, 50]}, {'extents': [10, 30, 30, 50], 'exact': 1}, {'extents': [110, 130, 130, 150]}, {}, {'extents': [-1000, -1000, 1000, 1000]} ] self.runTest(filename, requests, **params) def test_031_filter_rect_wkt_spatial_index(self): # Filter extents on WKT layer with spatial index filename = 'testextw.txt' params = {'delimiter': '|', 'type': 'csv', 'wktField': 'wkt', 'spatialIndex': 'Y'} requests = [ {'extents': [10, 30, 30, 50]}, {'extents': [10, 30, 30, 50], 'exact': 1}, {'extents': [110, 130, 130, 150]}, {}, {'extents': [-1000, -1000, 1000, 1000]} ] self.runTest(filename, requests, **params) def test_032_filter_rect_wkt_create_spatial_index(self): # Filter extents on WKT layer building spatial index filename = 'testextw.txt' params = {'delimiter': '|', 'type': 'csv', 'wktField': 'wkt'} requests = [ {'extents': [10, 30, 30, 50]}, {}, lambda layer: layer.dataProvider().createSpatialIndex(), {'extents': [10, 30, 30, 50]}, {'extents': [10, 30, 30, 50], 'exact': 1}, {'extents': [110, 130, 130, 150]}, {}, {'extents': [-1000, -1000, 1000, 1000]} ] self.runTest(filename, requests, **params) def test_033_reset_subset_string(self): # CSV file parsing filename = 'test.csv' params = {'geomType': 'none', 'type': 'csv'} requests = [ {}, lambda layer: layer.dataProvider().setSubsetString("id % 2 = 1", True), {}, lambda layer: layer.dataProvider().setSubsetString("id = 6", False), {}, lambda layer: layer.dataProvider().setSubsetString("id = 3", False), {}, lambda layer: layer.dataProvider().setSubsetString("id % 2 = 1", True), {}, lambda layer: layer.dataProvider().setSubsetString("id % 2 = 0", True), {}, ] self.runTest(filename, requests, **params) def test_034_csvt_file(self): # CSVT field types filename = 'testcsvt.csv' params = {'geomType': 'none', 'type': 'csv'} requests = None self.runTest(filename, requests, **params) def test_035_csvt_file2(self): # CSV field types 2 filename = 'testcsvt2.txt' params = {'geomType': 'none', 'type': 'csv', 'delimiter': '|'} requests = None self.runTest(filename, requests, **params) def test_036_csvt_file_invalid_types(self): # CSV field types invalid string format filename = 'testcsvt3.csv' params = {'geomType': 'none', 'type': 'csv'} requests = None self.runTest(filename, requests, **params) def test_037_csvt_file_invalid_file(self): # CSV field types invalid file filename = 'testcsvt4.csv' params = {'geomType': 'none', 'type': 'csv'} requests = None self.runTest(filename, requests, **params) def test_038_type_inference(self): # Skip lines filename = 'testtypes.csv' params = {'yField': 'lat', 'xField': 'lon', 'type': 'csv'} requests = None self.runTest(filename, requests, **params) def test_039_issue_13749(self): # First record contains missing geometry filename = 'test13749.csv' params = {'yField': 'geom_y', 'xField': 'geom_x', 'type': 'csv'} requests = None self.runTest(filename, requests, **params) def test_040_issue_14666(self): # x/y containing some null geometries filename = 'test14666.csv' params = {'yField': 'y', 'xField': 'x', 'type': 'csv', 'delimiter': '\\t'} requests = None self.runTest(filename, requests, **params) def test_041_no_detect_type(self): # CSV file parsing # Skip lines filename = 'testtypes.csv' params = {'yField': 'lat', 'xField': 'lon', 'type': 'csv', 'detectTypes': 'no'} requests = None self.runTest(filename, requests, **params) def test_042_no_detect_types_csvt(self): # CSVT field types filename = 'testcsvt.csv' params = {'geomType': 'none', 'type': 'csv', 'detectTypes': 'no'} requests = None self.runTest(filename, requests, **params) def test_043_decodeuri(self): # URI decoding filename = '/home/to/path/test.csv' uri = 'file://{}?geomType=none'.format(filename) registry = QgsProviderRegistry.instance() components = registry.decodeUri('delimitedtext', uri) self.assertEqual(components['path'], filename) if __name__ == '__main__': unittest.main()
gpl-2.0
alisaifee/djlimiter
djlimiter/middleware.py
1
5065
import importlib import logging from django.conf import settings from django.core.urlresolvers import resolve from limits.storage import storage_from_string from limits.strategies import STRATEGIES from limits.errors import ConfigurationError from limits.util import parse_many import six from .decorators import DECORATED, EXEMPT from .util import get_ipaddr, LimitWrapper, BlackHoleHandler from .errors import RateLimitExceeded class C: ENABLED = "RATELIMIT_ENABLED" HEADERS_ENABLED = "RATELIMIT_HEADERS_ENABLED" STORAGE_URL = "RATELIMIT_STORAGE_URL" STRATEGY = "RATELIMIT_STRATEGY" GLOBAL_LIMITS = "RATELIMIT_GLOBAL" HEADER_LIMIT = "RATELIMIT_HEADER_LIMIT" HEADER_REMAINING = "RATELIMIT_HEADER_REMAINING" HEADER_RESET = "RATELIMIT_HEADER_RESET" DEFAULT_KEY_FUNCTION = "RATELIMIT_KEY_FUNCTION" CALLBACK = "RATELIMIT_CALLBACK" class HEADERS: RESET = 1 REMAINING = 2 LIMIT = 3 class Limiter(object): """ """ def __init__(self): conf_limits = getattr(settings, C.GLOBAL_LIMITS, "") callback = getattr(settings, C.CALLBACK, self.__raise_exceeded ) self.enabled = getattr(settings, C.ENABLED, True) self.headers_enabled = getattr(settings, C.HEADERS_ENABLED, False) self.strategy = getattr(settings, C.STRATEGY, 'fixed-window') if self.strategy not in STRATEGIES: raise ConfigurationError("Invalid rate limiting strategy %s" % self.strategy) self.storage = storage_from_string(getattr(settings, C.STORAGE_URL, "memory://")) self.limiter = STRATEGIES[self.strategy](self.storage) self.key_function = getattr(settings, C.DEFAULT_KEY_FUNCTION, get_ipaddr) self.global_limits = [] if conf_limits: self.global_limits = [ LimitWrapper( list(parse_many(conf_limits)), self.key_function, None, False ) ] self.header_mapping = { HEADERS.RESET : getattr(settings,C.HEADER_RESET, "X-RateLimit-Reset"), HEADERS.REMAINING : getattr(settings,C.HEADER_REMAINING, "X-RateLimit-Remaining"), HEADERS.LIMIT : getattr(settings,C.HEADER_LIMIT, "X-RateLimit-Limit"), } self.logger = logging.getLogger("djlimiter") self.logger.addHandler(BlackHoleHandler()) if isinstance(callback, six.string_types): mod, _, name = callback.rpartition(".") try: self.callback = getattr(importlib.import_module(mod), name) except AttributeError: self.logger.error( "Unable to load callback function %s. Rate limiting disabled", callback ) self.enabled = False else: self.callback = callback def __raise_exceeded(self, limit): return RateLimitExceeded(limit) def process_request(self, request): """ :param request: :return: """ func = resolve(request.path).func name = resolve(request.path).view_name if func else "" limits = self.global_limits if ( not self.enabled or func in EXEMPT or not name ): return if func in DECORATED: if func in DECORATED: limits = DECORATED[func] limit_for_header = None failed_limit = None for lim in limits: limit_scope = lim.get_scope(request) or name cur_limits = lim.get_limits(request) for cur_limit in cur_limits: if not limit_for_header or cur_limit < limit_for_header[0]: limit_for_header = (cur_limit, (lim.key_func or self.key_function)(request), limit_scope) if lim.per_method: limit_scope += ":%s" % request.method if not self.limiter.hit(cur_limit, (lim.key_func or self.key_function)(request), limit_scope): self.logger.info("Rate limit exceeded for %s (%s)", name, cur_limit) failed_limit = cur_limit limit_for_header = (cur_limit, (lim.key_func or self.key_function)(request), limit_scope) break if failed_limit: break request.view_rate_limit = limit_for_header if failed_limit: return self.callback(failed_limit) def process_response(self, request, response): """ :param request: :param response: :return: """ current_limit = getattr(request, "view_rate_limit", None) if self.headers_enabled and current_limit: window_stats = self.limiter.get_window_stats(*current_limit) response[self.header_mapping[HEADERS.LIMIT]] = str(current_limit[0].amount) response[self.header_mapping[HEADERS.REMAINING]] = window_stats[1] response[self.header_mapping[HEADERS.RESET]] = window_stats[0] return response
mit
vshtanko/scikit-learn
sklearn/utils/sparsefuncs.py
220
11424
# Authors: Manoj Kumar # Thomas Unterthiner # License: BSD 3 clause import scipy.sparse as sp import numpy as np from .fixes import sparse_min_max, bincount from .sparsefuncs_fast import csr_mean_variance_axis0 as _csr_mean_var_axis0 from .sparsefuncs_fast import csc_mean_variance_axis0 as _csc_mean_var_axis0 def _raise_typeerror(X): """Raises a TypeError if X is not a CSR or CSC matrix""" input_type = X.format if sp.issparse(X) else type(X) err = "Expected a CSR or CSC sparse matrix, got %s." % input_type raise TypeError(err) def inplace_csr_column_scale(X, scale): """Inplace column scaling of a CSR matrix. Scale each feature of the data matrix by multiplying with specific scale provided by the caller assuming a (n_samples, n_features) shape. Parameters ---------- X : CSR matrix with shape (n_samples, n_features) Matrix to normalize using the variance of the features. scale : float array with shape (n_features,) Array of precomputed feature-wise values to use for scaling. """ assert scale.shape[0] == X.shape[1] X.data *= scale.take(X.indices, mode='clip') def inplace_csr_row_scale(X, scale): """ Inplace row scaling of a CSR matrix. Scale each sample of the data matrix by multiplying with specific scale provided by the caller assuming a (n_samples, n_features) shape. Parameters ---------- X : CSR sparse matrix, shape (n_samples, n_features) Matrix to be scaled. scale : float array with shape (n_samples,) Array of precomputed sample-wise values to use for scaling. """ assert scale.shape[0] == X.shape[0] X.data *= np.repeat(scale, np.diff(X.indptr)) def mean_variance_axis(X, axis): """Compute mean and variance along axis 0 on a CSR or CSC matrix Parameters ---------- X: CSR or CSC sparse matrix, shape (n_samples, n_features) Input data. axis: int (either 0 or 1) Axis along which the axis should be computed. Returns ------- means: float array with shape (n_features,) Feature-wise means variances: float array with shape (n_features,) Feature-wise variances """ if axis not in (0, 1): raise ValueError( "Unknown axis value: %d. Use 0 for rows, or 1 for columns" % axis) if isinstance(X, sp.csr_matrix): if axis == 0: return _csr_mean_var_axis0(X) else: return _csc_mean_var_axis0(X.T) elif isinstance(X, sp.csc_matrix): if axis == 0: return _csc_mean_var_axis0(X) else: return _csr_mean_var_axis0(X.T) else: _raise_typeerror(X) def inplace_column_scale(X, scale): """Inplace column scaling of a CSC/CSR matrix. Scale each feature of the data matrix by multiplying with specific scale provided by the caller assuming a (n_samples, n_features) shape. Parameters ---------- X: CSC or CSR matrix with shape (n_samples, n_features) Matrix to normalize using the variance of the features. scale: float array with shape (n_features,) Array of precomputed feature-wise values to use for scaling. """ if isinstance(X, sp.csc_matrix): inplace_csr_row_scale(X.T, scale) elif isinstance(X, sp.csr_matrix): inplace_csr_column_scale(X, scale) else: _raise_typeerror(X) def inplace_row_scale(X, scale): """ Inplace row scaling of a CSR or CSC matrix. Scale each row of the data matrix by multiplying with specific scale provided by the caller assuming a (n_samples, n_features) shape. Parameters ---------- X : CSR or CSC sparse matrix, shape (n_samples, n_features) Matrix to be scaled. scale : float array with shape (n_features,) Array of precomputed sample-wise values to use for scaling. """ if isinstance(X, sp.csc_matrix): inplace_csr_column_scale(X.T, scale) elif isinstance(X, sp.csr_matrix): inplace_csr_row_scale(X, scale) else: _raise_typeerror(X) def inplace_swap_row_csc(X, m, n): """ Swaps two rows of a CSC matrix in-place. Parameters ---------- X: scipy.sparse.csc_matrix, shape=(n_samples, n_features) Matrix whose two rows are to be swapped. m: int Index of the row of X to be swapped. n: int Index of the row of X to be swapped. """ for t in [m, n]: if isinstance(t, np.ndarray): raise TypeError("m and n should be valid integers") if m < 0: m += X.shape[0] if n < 0: n += X.shape[0] m_mask = X.indices == m X.indices[X.indices == n] = m X.indices[m_mask] = n def inplace_swap_row_csr(X, m, n): """ Swaps two rows of a CSR matrix in-place. Parameters ---------- X: scipy.sparse.csr_matrix, shape=(n_samples, n_features) Matrix whose two rows are to be swapped. m: int Index of the row of X to be swapped. n: int Index of the row of X to be swapped. """ for t in [m, n]: if isinstance(t, np.ndarray): raise TypeError("m and n should be valid integers") if m < 0: m += X.shape[0] if n < 0: n += X.shape[0] # The following swapping makes life easier since m is assumed to be the # smaller integer below. if m > n: m, n = n, m indptr = X.indptr m_start = indptr[m] m_stop = indptr[m + 1] n_start = indptr[n] n_stop = indptr[n + 1] nz_m = m_stop - m_start nz_n = n_stop - n_start if nz_m != nz_n: # Modify indptr first X.indptr[m + 2:n] += nz_n - nz_m X.indptr[m + 1] = m_start + nz_n X.indptr[n] = n_stop - nz_m X.indices = np.concatenate([X.indices[:m_start], X.indices[n_start:n_stop], X.indices[m_stop:n_start], X.indices[m_start:m_stop], X.indices[n_stop:]]) X.data = np.concatenate([X.data[:m_start], X.data[n_start:n_stop], X.data[m_stop:n_start], X.data[m_start:m_stop], X.data[n_stop:]]) def inplace_swap_row(X, m, n): """ Swaps two rows of a CSC/CSR matrix in-place. Parameters ---------- X : CSR or CSC sparse matrix, shape=(n_samples, n_features) Matrix whose two rows are to be swapped. m: int Index of the row of X to be swapped. n: int Index of the row of X to be swapped. """ if isinstance(X, sp.csc_matrix): return inplace_swap_row_csc(X, m, n) elif isinstance(X, sp.csr_matrix): return inplace_swap_row_csr(X, m, n) else: _raise_typeerror(X) def inplace_swap_column(X, m, n): """ Swaps two columns of a CSC/CSR matrix in-place. Parameters ---------- X : CSR or CSC sparse matrix, shape=(n_samples, n_features) Matrix whose two columns are to be swapped. m: int Index of the column of X to be swapped. n : int Index of the column of X to be swapped. """ if m < 0: m += X.shape[1] if n < 0: n += X.shape[1] if isinstance(X, sp.csc_matrix): return inplace_swap_row_csr(X, m, n) elif isinstance(X, sp.csr_matrix): return inplace_swap_row_csc(X, m, n) else: _raise_typeerror(X) def min_max_axis(X, axis): """Compute minimum and maximum along an axis on a CSR or CSC matrix Parameters ---------- X : CSR or CSC sparse matrix, shape (n_samples, n_features) Input data. axis: int (either 0 or 1) Axis along which the axis should be computed. Returns ------- mins: float array with shape (n_features,) Feature-wise minima maxs: float array with shape (n_features,) Feature-wise maxima """ if isinstance(X, sp.csr_matrix) or isinstance(X, sp.csc_matrix): return sparse_min_max(X, axis=axis) else: _raise_typeerror(X) def count_nonzero(X, axis=None, sample_weight=None): """A variant of X.getnnz() with extension to weighting on axis 0 Useful in efficiently calculating multilabel metrics. Parameters ---------- X : CSR sparse matrix, shape = (n_samples, n_labels) Input data. axis : None, 0 or 1 The axis on which the data is aggregated. sample_weight : array, shape = (n_samples,), optional Weight for each row of X. """ if axis == -1: axis = 1 elif axis == -2: axis = 0 elif X.format != 'csr': raise TypeError('Expected CSR sparse format, got {0}'.format(X.format)) # We rely here on the fact that np.diff(Y.indptr) for a CSR # will return the number of nonzero entries in each row. # A bincount over Y.indices will return the number of nonzeros # in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14. if axis is None: if sample_weight is None: return X.nnz else: return np.dot(np.diff(X.indptr), sample_weight) elif axis == 1: out = np.diff(X.indptr) if sample_weight is None: return out return out * sample_weight elif axis == 0: if sample_weight is None: return bincount(X.indices, minlength=X.shape[1]) else: weights = np.repeat(sample_weight, np.diff(X.indptr)) return bincount(X.indices, minlength=X.shape[1], weights=weights) else: raise ValueError('Unsupported axis: {0}'.format(axis)) def _get_median(data, n_zeros): """Compute the median of data with n_zeros additional zeros. This function is used to support sparse matrices; it modifies data in-place """ n_elems = len(data) + n_zeros if not n_elems: return np.nan n_negative = np.count_nonzero(data < 0) middle, is_odd = divmod(n_elems, 2) data.sort() if is_odd: return _get_elem_at_rank(middle, data, n_negative, n_zeros) return (_get_elem_at_rank(middle - 1, data, n_negative, n_zeros) + _get_elem_at_rank(middle, data, n_negative, n_zeros)) / 2. def _get_elem_at_rank(rank, data, n_negative, n_zeros): """Find the value in data augmented with n_zeros for the given rank""" if rank < n_negative: return data[rank] if rank - n_negative < n_zeros: return 0 return data[rank - n_zeros] def csc_median_axis_0(X): """Find the median across axis 0 of a CSC matrix. It is equivalent to doing np.median(X, axis=0). Parameters ---------- X : CSC sparse matrix, shape (n_samples, n_features) Input data. Returns ------- median : ndarray, shape (n_features,) Median. """ if not isinstance(X, sp.csc_matrix): raise TypeError("Expected matrix of CSC format, got %s" % X.format) indptr = X.indptr n_samples, n_features = X.shape median = np.zeros(n_features) for f_ind, (start, end) in enumerate(zip(indptr[:-1], indptr[1:])): # Prevent modifying X in place data = np.copy(X.data[start: end]) nz = n_samples - data.size median[f_ind] = _get_median(data, nz) return median
bsd-3-clause
a40223217/2015cdb_g6team3
static/Brython3.1.1-20150328-091302/Lib/importlib/__init__.py
610
3472
"""A pure Python implementation of import.""" __all__ = ['__import__', 'import_module', 'invalidate_caches'] # Bootstrap help ##################################################### # Until bootstrapping is complete, DO NOT import any modules that attempt # to import importlib._bootstrap (directly or indirectly). Since this # partially initialised package would be present in sys.modules, those # modules would get an uninitialised copy of the source version, instead # of a fully initialised version (either the frozen one or the one # initialised below if the frozen one is not available). import _imp # Just the builtin component, NOT the full Python module import sys from . import machinery #fix me brython try: import _frozen_importlib as _bootstrap except ImportError: from . import _bootstrap _bootstrap._setup(sys, _imp) else: # importlib._bootstrap is the built-in import, ensure we don't create # a second copy of the module. _bootstrap.__name__ = 'importlib._bootstrap' _bootstrap.__package__ = 'importlib' _bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py') sys.modules['importlib._bootstrap'] = _bootstrap # To simplify imports in test code _w_long = _bootstrap._w_long _r_long = _bootstrap._r_long # Fully bootstrapped at this point, import whatever you like, circular # dependencies and startup overhead minimisation permitting :) # Public API ######################################################### from ._bootstrap import __import__ def invalidate_caches(): """Call the invalidate_caches() method on all meta path finders stored in sys.meta_path (where implemented).""" for finder in sys.meta_path: if hasattr(finder, 'invalidate_caches'): finder.invalidate_caches() def find_loader(name, path=None): """Find the loader for the specified module. First, sys.modules is checked to see if the module was already imported. If so, then sys.modules[name].__loader__ is returned. If that happens to be set to None, then ValueError is raised. If the module is not in sys.modules, then sys.meta_path is searched for a suitable loader with the value of 'path' given to the finders. None is returned if no loader could be found. Dotted names do not have their parent packages implicitly imported. You will most likely need to explicitly import all parent packages in the proper order for a submodule to get the correct loader. """ try: loader = sys.modules[name].__loader__ if loader is None: raise ValueError('{}.__loader__ is None'.format(name)) else: return loader except KeyError: pass return _bootstrap._find_module(name, path) def import_module(name, package=None): """Import a module. The 'package' argument is required when performing a relative import. It specifies the package to use as the anchor point from which to resolve the relative import to an absolute import. """ level = 0 if name.startswith('.'): if not package: raise TypeError("relative imports require the 'package' argument") for character in name: if character != '.': break level += 1 return _bootstrap._gcd_import(name[level:], package, level) #need at least one import hook for importlib stuff to work. import basehook sys.meta_path.append(basehook.BaseHook())
gpl-3.0
PROGRAM-IX/pystroke
event_engine.py
1
1542
import pygame from pygame.locals import * class EventEngine: """ Reads the event queue and passes events to other engines @author: James Heslin (PROGRAM_IX) """ def __init__(self, i_e): """ Takes an InputEngine and passes all relevant events to it @type i_e: InputEngine @param i_e: InputEngine to which input events should be passed @author: James Heslin (PROGRAM_IX) """ self.input = i_e def update(self): """ Pulls all relevant events from the event queue and passes them to the appropriate engines @author: James Heslin (PROGRAM_IX) """ for e in pygame.event.get(): if e.type == MOUSEMOTION: self.input.mouse_motion(e) elif e.type == MOUSEBUTTONDOWN: self.input.mouse_b_down(e) elif e.type == MOUSEBUTTONUP: self.input.mouse_b_up(e) elif e.type == KEYDOWN: self.input.key_down(e) elif e.type == KEYUP: self.input.key_up(e) def reset_input(self): """ Resets the InputEngine's values @author: James Heslin (PROGRAM_IX) """ print "RESETTING INPUT" self.input.reset() def print_input_states(self): """ Prints the states of the InputEngine @author: James Heslin (PROGRAM_IX) """ self.input.print_all_states()
mit
yamaneko1212/webpay-python
webpay/model/charge.py
1
1090
from webpay.model.card import Card from .model import Model class Charge(Model): def __init__(self, client, data): Model.__init__(self, client, data) def _instantiate_field(self, key, value): if key == 'card': return Card(self._client, value) else: return Model._instantiate_field(self, key, value) def refund(self, amount=None): """Refund this charge. Arguments: - `amount`: amount to refund. If `amount` is not given or `None`, refund all. If `amount` is less than this charge's amount, refund partially. """ self._update_attributes(self._client.charges.refund(self.id, amount)) def capture(self, amount=None): """Capture this charge. This charge should be uncaptured (created with capture=false) and not yet expired. Arguments: - `amount`: amount to capture. If `amount` is not given or `None`, use `this.amount`. """ self._update_attributes(self._client.charges.capture(self.id, amount))
mit
andyzsf/edx
cms/djangoapps/contentstore/views/tests/test_import_export.py
25
11737
""" Unit tests for course import and export """ import copy import json import logging import os import shutil import tarfile import tempfile from path import path from uuid import uuid4 from django.test.utils import override_settings from django.conf import settings from contentstore.utils import reverse_course_url from xmodule.modulestore.tests.factories import ItemFactory from contentstore.tests.utils import CourseTestCase from student import auth from student.roles import CourseInstructorRole, CourseStaffRole TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE) TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex log = logging.getLogger(__name__) @override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE) class ImportTestCase(CourseTestCase): """ Unit tests for importing a course """ def setUp(self): super(ImportTestCase, self).setUp() self.url = reverse_course_url('import_handler', self.course.id) self.content_dir = path(tempfile.mkdtemp()) def touch(name): """ Equivalent to shell's 'touch'""" with file(name, 'a'): os.utime(name, None) # Create tar test files ----------------------------------------------- # OK course: good_dir = tempfile.mkdtemp(dir=self.content_dir) # test course being deeper down than top of tar file embedded_dir = os.path.join(good_dir, "grandparent", "parent") os.makedirs(os.path.join(embedded_dir, "course")) with open(os.path.join(embedded_dir, "course.xml"), "w+") as f: f.write('<course url_name="2013_Spring" org="EDx" course="0.00x"/>') with open(os.path.join(embedded_dir, "course", "2013_Spring.xml"), "w+") as f: f.write('<course></course>') self.good_tar = os.path.join(self.content_dir, "good.tar.gz") with tarfile.open(self.good_tar, "w:gz") as gtar: gtar.add(good_dir) # Bad course (no 'course.xml' file): bad_dir = tempfile.mkdtemp(dir=self.content_dir) touch(os.path.join(bad_dir, "bad.xml")) self.bad_tar = os.path.join(self.content_dir, "bad.tar.gz") with tarfile.open(self.bad_tar, "w:gz") as btar: btar.add(bad_dir) self.unsafe_common_dir = path(tempfile.mkdtemp(dir=self.content_dir)) def tearDown(self): shutil.rmtree(self.content_dir) def test_no_coursexml(self): """ Check that the response for a tar.gz import without a course.xml is correct. """ with open(self.bad_tar) as btar: resp = self.client.post( self.url, { "name": self.bad_tar, "course-data": [btar] }) self.assertEquals(resp.status_code, 415) # Check that `import_status` returns the appropriate stage (i.e., the # stage at which import failed). resp_status = self.client.get( reverse_course_url( 'import_status_handler', self.course.id, kwargs={'filename': os.path.split(self.bad_tar)[1]} ) ) self.assertEquals(json.loads(resp_status.content)["ImportStatus"], -2) def test_with_coursexml(self): """ Check that the response for a tar.gz import with a course.xml is correct. """ with open(self.good_tar) as gtar: args = {"name": self.good_tar, "course-data": [gtar]} resp = self.client.post(self.url, args) self.assertEquals(resp.status_code, 200) def test_import_in_existing_course(self): """ Check that course is imported successfully in existing course and users have their access roles """ # Create a non_staff user and add it to course staff only __, nonstaff_user = self.create_non_staff_authed_user_client(authenticate=False) auth.add_users(self.user, CourseStaffRole(self.course.id), nonstaff_user) course = self.store.get_course(self.course.id) self.assertIsNotNone(course) display_name_before_import = course.display_name # Check that global staff user can import course with open(self.good_tar) as gtar: args = {"name": self.good_tar, "course-data": [gtar]} resp = self.client.post(self.url, args) self.assertEquals(resp.status_code, 200) course = self.store.get_course(self.course.id) self.assertIsNotNone(course) display_name_after_import = course.display_name # Check that course display name have changed after import self.assertNotEqual(display_name_before_import, display_name_after_import) # Now check that non_staff user has his same role self.assertFalse(CourseInstructorRole(self.course.id).has_user(nonstaff_user)) self.assertTrue(CourseStaffRole(self.course.id).has_user(nonstaff_user)) # Now course staff user can also successfully import course self.client.login(username=nonstaff_user.username, password='foo') with open(self.good_tar) as gtar: args = {"name": self.good_tar, "course-data": [gtar]} resp = self.client.post(self.url, args) self.assertEquals(resp.status_code, 200) # Now check that non_staff user has his same role self.assertFalse(CourseInstructorRole(self.course.id).has_user(nonstaff_user)) self.assertTrue(CourseStaffRole(self.course.id).has_user(nonstaff_user)) ## Unsafe tar methods ##################################################### # Each of these methods creates a tarfile with a single type of unsafe # content. def _fifo_tar(self): """ Tar file with FIFO """ fifop = self.unsafe_common_dir / "fifo.file" fifo_tar = self.unsafe_common_dir / "fifo.tar.gz" os.mkfifo(fifop) with tarfile.open(fifo_tar, "w:gz") as tar: tar.add(fifop) return fifo_tar def _symlink_tar(self): """ Tarfile with symlink to path outside directory. """ outsidep = self.unsafe_common_dir / "unsafe_file.txt" symlinkp = self.unsafe_common_dir / "symlink.txt" symlink_tar = self.unsafe_common_dir / "symlink.tar.gz" outsidep.symlink(symlinkp) with tarfile.open(symlink_tar, "w:gz") as tar: tar.add(symlinkp) return symlink_tar def _outside_tar(self): """ Tarfile with file that extracts to outside directory. Extracting this tarfile in directory <dir> will put its contents directly in <dir> (rather than <dir/tarname>). """ outside_tar = self.unsafe_common_dir / "unsafe_file.tar.gz" with tarfile.open(outside_tar, "w:gz") as tar: tar.addfile(tarfile.TarInfo(str(self.content_dir / "a_file"))) return outside_tar def _outside_tar2(self): """ Tarfile with file that extracts to outside directory. The path here matches the basename (`self.unsafe_common_dir`), but then "cd's out". E.g. "/usr/../etc" == "/etc", but the naive basename of the first (but not the second) is "/usr" Extracting this tarfile in directory <dir> will also put its contents directly in <dir> (rather than <dir/tarname>). """ outside_tar = self.unsafe_common_dir / "unsafe_file.tar.gz" with tarfile.open(outside_tar, "w:gz") as tar: tar.addfile(tarfile.TarInfo(str(self.unsafe_common_dir / "../a_file"))) return outside_tar def test_unsafe_tar(self): """ Check that safety measure work. This includes: 'tarbombs' which include files or symlinks with paths outside or directly in the working directory, 'special files' (character device, block device or FIFOs), all raise exceptions/400s. """ def try_tar(tarpath): """ Attempt to tar an unacceptable file """ with open(tarpath) as tar: args = {"name": tarpath, "course-data": [tar]} resp = self.client.post(self.url, args) self.assertEquals(resp.status_code, 400) self.assertTrue("SuspiciousFileOperation" in resp.content) try_tar(self._fifo_tar()) try_tar(self._symlink_tar()) try_tar(self._outside_tar()) try_tar(self._outside_tar2()) # Check that `import_status` returns the appropriate stage (i.e., # either 3, indicating all previous steps are completed, or 0, # indicating no upload in progress) resp_status = self.client.get( reverse_course_url( 'import_status_handler', self.course.id, kwargs={'filename': os.path.split(self.good_tar)[1]} ) ) import_status = json.loads(resp_status.content)["ImportStatus"] self.assertIn(import_status, (0, 3)) @override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE) class ExportTestCase(CourseTestCase): """ Tests for export_handler. """ def setUp(self): """ Sets up the test course. """ super(ExportTestCase, self).setUp() self.url = reverse_course_url('export_handler', self.course.id) def test_export_html(self): """ Get the HTML for the page. """ resp = self.client.get_html(self.url) self.assertEquals(resp.status_code, 200) self.assertContains(resp, "Export My Course Content") def test_export_json_unsupported(self): """ JSON is unsupported. """ resp = self.client.get(self.url, HTTP_ACCEPT='application/json') self.assertEquals(resp.status_code, 406) def test_export_targz(self): """ Get tar.gz file, using HTTP_ACCEPT. """ resp = self.client.get(self.url, HTTP_ACCEPT='application/x-tgz') self._verify_export_succeeded(resp) def test_export_targz_urlparam(self): """ Get tar.gz file, using URL parameter. """ resp = self.client.get(self.url + '?_accept=application/x-tgz') self._verify_export_succeeded(resp) def _verify_export_succeeded(self, resp): """ Export success helper method. """ self.assertEquals(resp.status_code, 200) self.assertTrue(resp.get('Content-Disposition').startswith('attachment')) def test_export_failure_top_level(self): """ Export failure. """ fake_xblock = ItemFactory.create(parent_location=self.course.location, category='aawefawef') self.store.publish(fake_xblock.location, self.user.id) self._verify_export_failure(u'/container/{}'.format(self.course.location)) def test_export_failure_subsection_level(self): """ Slightly different export failure. """ vertical = ItemFactory.create(parent_location=self.course.location, category='vertical', display_name='foo') ItemFactory.create( parent_location=vertical.location, category='aawefawef' ) self._verify_export_failure(u'/container/{}'.format(vertical.location)) def _verify_export_failure(self, expected_text): """ Export failure helper method. """ resp = self.client.get(self.url, HTTP_ACCEPT='application/x-tgz') self.assertEquals(resp.status_code, 200) self.assertIsNone(resp.get('Content-Disposition')) self.assertContains(resp, 'Unable to create xml for module') self.assertContains(resp, expected_text)
agpl-3.0
broesamle/servo
tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/__init__.py
552
8263
# Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """WebSocket extension for Apache HTTP Server. mod_pywebsocket is a WebSocket extension for Apache HTTP Server intended for testing or experimental purposes. mod_python is required. Installation ============ 0. Prepare an Apache HTTP Server for which mod_python is enabled. 1. Specify the following Apache HTTP Server directives to suit your configuration. If mod_pywebsocket is not in the Python path, specify the following. <websock_lib> is the directory where mod_pywebsocket is installed. PythonPath "sys.path+['<websock_lib>']" Always specify the following. <websock_handlers> is the directory where user-written WebSocket handlers are placed. PythonOption mod_pywebsocket.handler_root <websock_handlers> PythonHeaderParserHandler mod_pywebsocket.headerparserhandler To limit the search for WebSocket handlers to a directory <scan_dir> under <websock_handlers>, configure as follows: PythonOption mod_pywebsocket.handler_scan <scan_dir> <scan_dir> is useful in saving scan time when <websock_handlers> contains many non-WebSocket handler files. If you want to allow handlers whose canonical path is not under the root directory (i.e. symbolic link is in root directory but its target is not), configure as follows: PythonOption mod_pywebsocket.allow_handlers_outside_root_dir On Example snippet of httpd.conf: (mod_pywebsocket is in /websock_lib, WebSocket handlers are in /websock_handlers, port is 80 for ws, 443 for wss.) <IfModule python_module> PythonPath "sys.path+['/websock_lib']" PythonOption mod_pywebsocket.handler_root /websock_handlers PythonHeaderParserHandler mod_pywebsocket.headerparserhandler </IfModule> 2. Tune Apache parameters for serving WebSocket. We'd like to note that at least TimeOut directive from core features and RequestReadTimeout directive from mod_reqtimeout should be modified not to kill connections in only a few seconds of idle time. 3. Verify installation. You can use example/console.html to poke the server. Writing WebSocket handlers ========================== When a WebSocket request comes in, the resource name specified in the handshake is considered as if it is a file path under <websock_handlers> and the handler defined in <websock_handlers>/<resource_name>_wsh.py is invoked. For example, if the resource name is /example/chat, the handler defined in <websock_handlers>/example/chat_wsh.py is invoked. A WebSocket handler is composed of the following three functions: web_socket_do_extra_handshake(request) web_socket_transfer_data(request) web_socket_passive_closing_handshake(request) where: request: mod_python request. web_socket_do_extra_handshake is called during the handshake after the headers are successfully parsed and WebSocket properties (ws_location, ws_origin, and ws_resource) are added to request. A handler can reject the request by raising an exception. A request object has the following properties that you can use during the extra handshake (web_socket_do_extra_handshake): - ws_resource - ws_origin - ws_version - ws_location (HyBi 00 only) - ws_extensions (HyBi 06 and later) - ws_deflate (HyBi 06 and later) - ws_protocol - ws_requested_protocols (HyBi 06 and later) The last two are a bit tricky. See the next subsection. Subprotocol Negotiation ----------------------- For HyBi 06 and later, ws_protocol is always set to None when web_socket_do_extra_handshake is called. If ws_requested_protocols is not None, you must choose one subprotocol from this list and set it to ws_protocol. For HyBi 00, when web_socket_do_extra_handshake is called, ws_protocol is set to the value given by the client in Sec-WebSocket-Protocol header or None if such header was not found in the opening handshake request. Finish extra handshake with ws_protocol untouched to accept the request subprotocol. Then, Sec-WebSocket-Protocol header will be sent to the client in response with the same value as requested. Raise an exception in web_socket_do_extra_handshake to reject the requested subprotocol. Data Transfer ------------- web_socket_transfer_data is called after the handshake completed successfully. A handler can receive/send messages from/to the client using request. mod_pywebsocket.msgutil module provides utilities for data transfer. You can receive a message by the following statement. message = request.ws_stream.receive_message() This call blocks until any complete text frame arrives, and the payload data of the incoming frame will be stored into message. When you're using IETF HyBi 00 or later protocol, receive_message() will return None on receiving client-initiated closing handshake. When any error occurs, receive_message() will raise some exception. You can send a message by the following statement. request.ws_stream.send_message(message) Closing Connection ------------------ Executing the following statement or just return-ing from web_socket_transfer_data cause connection close. request.ws_stream.close_connection() close_connection will wait for closing handshake acknowledgement coming from the client. When it couldn't receive a valid acknowledgement, raises an exception. web_socket_passive_closing_handshake is called after the server receives incoming closing frame from the client peer immediately. You can specify code and reason by return values. They are sent as a outgoing closing frame from the server. A request object has the following properties that you can use in web_socket_passive_closing_handshake. - ws_close_code - ws_close_reason Threading --------- A WebSocket handler must be thread-safe if the server (Apache or standalone.py) is configured to use threads. Configuring WebSocket Extension Processors ------------------------------------------ See extensions.py for supported WebSocket extensions. Note that they are unstable and their APIs are subject to change substantially. A request object has these extension processing related attributes. - ws_requested_extensions: A list of common.ExtensionParameter instances representing extension parameters received from the client in the client's opening handshake. You shouldn't modify it manually. - ws_extensions: A list of common.ExtensionParameter instances representing extension parameters to send back to the client in the server's opening handshake. You shouldn't touch it directly. Instead, call methods on extension processors. - ws_extension_processors: A list of loaded extension processors. Find the processor for the extension you want to configure from it, and call its methods. """ # vi:sts=4 sw=4 et tw=72
mpl-2.0
damdam-s/OpenUpgrade
addons/project_issue/migrations/8.0.1.0/pre-migration.py
14
1454
# -*- coding: utf-8 -*- ############################################################################## # # Odoo, a suite of business apps # This module Copyright (C) 2014 Therp BV (<http://therp.nl>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.openupgrade import openupgrade column_renames = {'project_issue': [('priority', None)]} xmlid_renames = [ ('project_issue.mt_issue_closed', 'project_issue.mt_issue_ready'), ('project_issue.mt_issue_started', 'project_issue.mt_issue_assigned'), ('project_issue.mt_project_issue_started', 'project_issue.mt_project_issue_assigned'), ] @openupgrade.migrate() def migrate(cr, version): openupgrade.rename_columns(cr, column_renames) openupgrade.rename_xmlids(cr, xmlid_renames)
agpl-3.0
miguelpalacio/python-for-android
python-build/python-libs/gdata/build/lib/gdata/tlslite/integration/HTTPTLSConnection.py
271
6668
"""TLS Lite + httplib.""" import socket import httplib from gdata.tlslite.TLSConnection import TLSConnection from gdata.tlslite.integration.ClientHelper import ClientHelper class HTTPBaseTLSConnection(httplib.HTTPConnection): """This abstract class provides a framework for adding TLS support to httplib.""" default_port = 443 def __init__(self, host, port=None, strict=None): if strict == None: #Python 2.2 doesn't support strict httplib.HTTPConnection.__init__(self, host, port) else: httplib.HTTPConnection.__init__(self, host, port, strict) def connect(self): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if hasattr(sock, 'settimeout'): sock.settimeout(10) sock.connect((self.host, self.port)) #Use a TLSConnection to emulate a socket self.sock = TLSConnection(sock) #When httplib closes this, close the socket self.sock.closeSocket = True self._handshake(self.sock) def _handshake(self, tlsConnection): """Called to perform some sort of handshake. This method must be overridden in a subclass to do some type of handshake. This method will be called after the socket has been connected but before any data has been sent. If this method does not raise an exception, the TLS connection will be considered valid. This method may (or may not) be called every time an HTTP request is performed, depending on whether the underlying HTTP connection is persistent. @type tlsConnection: L{tlslite.TLSConnection.TLSConnection} @param tlsConnection: The connection to perform the handshake on. """ raise NotImplementedError() class HTTPTLSConnection(HTTPBaseTLSConnection, ClientHelper): """This class extends L{HTTPBaseTLSConnection} to support the common types of handshaking.""" def __init__(self, host, port=None, username=None, password=None, sharedKey=None, certChain=None, privateKey=None, cryptoID=None, protocol=None, x509Fingerprint=None, x509TrustList=None, x509CommonName=None, settings = None): """Create a new HTTPTLSConnection. For client authentication, use one of these argument combinations: - username, password (SRP) - username, sharedKey (shared-key) - certChain, privateKey (certificate) For server authentication, you can either rely on the implicit mutual authentication performed by SRP or shared-keys, or you can do certificate-based server authentication with one of these argument combinations: - cryptoID[, protocol] (requires cryptoIDlib) - x509Fingerprint - x509TrustList[, x509CommonName] (requires cryptlib_py) Certificate-based server authentication is compatible with SRP or certificate-based client authentication. It is not compatible with shared-keys. The constructor does not perform the TLS handshake itself, but simply stores these arguments for later. The handshake is performed only when this class needs to connect with the server. Thus you should be prepared to handle TLS-specific exceptions when calling methods inherited from L{httplib.HTTPConnection} such as request(), connect(), and send(). See the client handshake functions in L{tlslite.TLSConnection.TLSConnection} for details on which exceptions might be raised. @type host: str @param host: Server to connect to. @type port: int @param port: Port to connect to. @type username: str @param username: SRP or shared-key username. Requires the 'password' or 'sharedKey' argument. @type password: str @param password: SRP password for mutual authentication. Requires the 'username' argument. @type sharedKey: str @param sharedKey: Shared key for mutual authentication. Requires the 'username' argument. @type certChain: L{tlslite.X509CertChain.X509CertChain} or L{cryptoIDlib.CertChain.CertChain} @param certChain: Certificate chain for client authentication. Requires the 'privateKey' argument. Excludes the SRP or shared-key related arguments. @type privateKey: L{tlslite.utils.RSAKey.RSAKey} @param privateKey: Private key for client authentication. Requires the 'certChain' argument. Excludes the SRP or shared-key related arguments. @type cryptoID: str @param cryptoID: cryptoID for server authentication. Mutually exclusive with the 'x509...' arguments. @type protocol: str @param protocol: cryptoID protocol URI for server authentication. Requires the 'cryptoID' argument. @type x509Fingerprint: str @param x509Fingerprint: Hex-encoded X.509 fingerprint for server authentication. Mutually exclusive with the 'cryptoID' and 'x509TrustList' arguments. @type x509TrustList: list of L{tlslite.X509.X509} @param x509TrustList: A list of trusted root certificates. The other party must present a certificate chain which extends to one of these root certificates. The cryptlib_py module must be installed to use this parameter. Mutually exclusive with the 'cryptoID' and 'x509Fingerprint' arguments. @type x509CommonName: str @param x509CommonName: The end-entity certificate's 'CN' field must match this value. For a web server, this is typically a server name such as 'www.amazon.com'. Mutually exclusive with the 'cryptoID' and 'x509Fingerprint' arguments. Requires the 'x509TrustList' argument. @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} @param settings: Various settings which can be used to control the ciphersuites, certificate types, and SSL/TLS versions offered by the client. """ HTTPBaseTLSConnection.__init__(self, host, port) ClientHelper.__init__(self, username, password, sharedKey, certChain, privateKey, cryptoID, protocol, x509Fingerprint, x509TrustList, x509CommonName, settings) def _handshake(self, tlsConnection): ClientHelper._handshake(self, tlsConnection)
apache-2.0
FuzzyBearBTC/Peershares2
contrib/pyminer/pyminer.py
1257
6438
#!/usr/bin/python # # Copyright (c) 2011 The Bitcoin developers # Distributed under the MIT/X11 software license, see the accompanying # file license.txt or http://www.opensource.org/licenses/mit-license.php. # import time import json import pprint import hashlib import struct import re import base64 import httplib import sys from multiprocessing import Process ERR_SLEEP = 15 MAX_NONCE = 1000000L settings = {} pp = pprint.PrettyPrinter(indent=4) class BitcoinRPC: OBJID = 1 def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) self.authhdr = "Basic %s" % (base64.b64encode(authpair)) self.conn = httplib.HTTPConnection(host, port, False, 30) def rpc(self, method, params=None): self.OBJID += 1 obj = { 'version' : '1.1', 'method' : method, 'id' : self.OBJID } if params is None: obj['params'] = [] else: obj['params'] = params self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) resp = self.conn.getresponse() if resp is None: print "JSON-RPC: no response" return None body = resp.read() resp_obj = json.loads(body) if resp_obj is None: print "JSON-RPC: cannot JSON-decode body" return None if 'error' in resp_obj and resp_obj['error'] != None: return resp_obj['error'] if 'result' not in resp_obj: print "JSON-RPC: no result in object" return None return resp_obj['result'] def getblockcount(self): return self.rpc('getblockcount') def getwork(self, data=None): return self.rpc('getwork', data) def uint32(x): return x & 0xffffffffL def bytereverse(x): return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): word = struct.unpack('@I', in_buf[i:i+4])[0] out_words.append(struct.pack('@I', bytereverse(word))) return ''.join(out_words) def wordreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): out_words.append(in_buf[i:i+4]) out_words.reverse() return ''.join(out_words) class Miner: def __init__(self, id): self.id = id self.max_nonce = MAX_NONCE def work(self, datastr, targetstr): # decode work data hex string to binary static_data = datastr.decode('hex') static_data = bufreverse(static_data) # the first 76b of 80b do not change blk_hdr = static_data[:76] # decode 256-bit target value targetbin = targetstr.decode('hex') targetbin = targetbin[::-1] # byte-swap and dword-swap targetbin_str = targetbin.encode('hex') target = long(targetbin_str, 16) # pre-hash first 76b of block header static_hash = hashlib.sha256() static_hash.update(blk_hdr) for nonce in xrange(self.max_nonce): # encode 32-bit nonce value nonce_bin = struct.pack("<I", nonce) # hash final 4b, the nonce value hash1_o = static_hash.copy() hash1_o.update(nonce_bin) hash1 = hash1_o.digest() # sha256 hash of sha256 hash hash_o = hashlib.sha256() hash_o.update(hash1) hash = hash_o.digest() # quick test for winning solution: high 32 bits zero? if hash[-4:] != '\0\0\0\0': continue # convert binary hash to 256-bit Python long hash = bufreverse(hash) hash = wordreverse(hash) hash_str = hash.encode('hex') l = long(hash_str, 16) # proof-of-work test: hash < target if l < target: print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,) return (nonce + 1, nonce_bin) else: print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,) # return (nonce + 1, nonce_bin) return (nonce + 1, None) def submit_work(self, rpc, original_data, nonce_bin): nonce_bin = bufreverse(nonce_bin) nonce = nonce_bin.encode('hex') solution = original_data[:152] + nonce + original_data[160:256] param_arr = [ solution ] result = rpc.getwork(param_arr) print time.asctime(), "--> Upstream RPC result:", result def iterate(self, rpc): work = rpc.getwork() if work is None: time.sleep(ERR_SLEEP) return if 'data' not in work or 'target' not in work: time.sleep(ERR_SLEEP) return time_start = time.time() (hashes_done, nonce_bin) = self.work(work['data'], work['target']) time_end = time.time() time_diff = time_end - time_start self.max_nonce = long( (hashes_done * settings['scantime']) / time_diff) if self.max_nonce > 0xfffffffaL: self.max_nonce = 0xfffffffaL if settings['hashmeter']: print "HashMeter(%d): %d hashes, %.2f Khash/sec" % ( self.id, hashes_done, (hashes_done / 1000.0) / time_diff) if nonce_bin is not None: self.submit_work(rpc, work['data'], nonce_bin) def loop(self): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpass']) if rpc is None: return while True: self.iterate(rpc) def miner_thread(id): miner = Miner(id) miner.loop() if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: pyminer.py CONFIG-FILE" sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 8332 if 'threads' not in settings: settings['threads'] = 1 if 'hashmeter' not in settings: settings['hashmeter'] = 0 if 'scantime' not in settings: settings['scantime'] = 30L if 'rpcuser' not in settings or 'rpcpass' not in settings: print "Missing username and/or password in cfg file" sys.exit(1) settings['port'] = int(settings['port']) settings['threads'] = int(settings['threads']) settings['hashmeter'] = int(settings['hashmeter']) settings['scantime'] = long(settings['scantime']) thr_list = [] for thr_id in range(settings['threads']): p = Process(target=miner_thread, args=(thr_id,)) p.start() thr_list.append(p) time.sleep(1) # stagger threads print settings['threads'], "mining threads started" print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port']) try: for thr_proc in thr_list: thr_proc.join() except KeyboardInterrupt: pass print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
mit
benwolfe/esp8266-Arduino
esp8266com/esp8266/tools/macosx/xtensa-lx106-elf/xtensa-lx106-elf/sysroot/lib/libstdc++.a-gdb.py
6
2433
# -*- python -*- # Copyright (C) 2009-2013 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys import gdb import os import os.path pythondir = '/Users/igrokhotkov/e/ESPTools/crosstool-NG/builds/xtensa-lx106-elf/share/gcc-4.8.2/python' libdir = '/Users/igrokhotkov/e/ESPTools/crosstool-NG/builds/xtensa-lx106-elf/xtensa-lx106-elf/lib' # This file might be loaded when there is no current objfile. This # can happen if the user loads it manually. In this case we don't # update sys.path; instead we just hope the user managed to do that # beforehand. if gdb.current_objfile () is not None: # Update module path. We want to find the relative path from libdir # to pythondir, and then we want to apply that relative path to the # directory holding the objfile with which this file is associated. # This preserves relocatability of the gcc tree. # Do a simple normalization that removes duplicate separators. pythondir = os.path.normpath (pythondir) libdir = os.path.normpath (libdir) prefix = os.path.commonprefix ([libdir, pythondir]) # In some bizarre configuration we might have found a match in the # middle of a directory name. if prefix[-1] != '/': prefix = os.path.dirname (prefix) + '/' # Strip off the prefix. pythondir = pythondir[len (prefix):] libdir = libdir[len (prefix):] # Compute the ".."s needed to get from libdir to the prefix. dotdots = ('..' + os.sep) * len (libdir.split (os.sep)) objfile = gdb.current_objfile ().filename dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir) if not dir_ in sys.path: sys.path.insert(0, dir_) # Load the pretty-printers. from libstdcxx.v6.printers import register_libstdcxx_printers register_libstdcxx_printers (gdb.current_objfile ())
lgpl-2.1
jenalgit/django
tests/urlpatterns_reverse/tests.py
154
50058
# -*- coding: utf-8 -*- """ Unit tests for reverse URL lookups. """ from __future__ import unicode_literals import sys import unittest from admin_scripts.tests import AdminScriptTestCase from django.conf import settings from django.conf.urls import include, url from django.contrib.auth.models import User from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist from django.core.urlresolvers import ( NoReverseMatch, RegexURLPattern, RegexURLResolver, Resolver404, ResolverMatch, get_callable, get_resolver, resolve, reverse, reverse_lazy, ) from django.http import ( HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect, ) from django.shortcuts import redirect from django.test import ( SimpleTestCase, TestCase, ignore_warnings, override_settings, ) from django.test.utils import override_script_prefix from django.utils import six from django.utils.deprecation import ( RemovedInDjango20Warning, RemovedInDjango110Warning, ) from . import middleware, urlconf_outer, views from .views import empty_view resolve_test_data = ( # These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs) # Simple case ('/normal/42/37/', 'normal-view', '', '', 'normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}), ('/view_class/42/37/', 'view-class', '', '', 'view-class', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}), ('/included/normal/42/37/', 'inc-normal-view', '', '', 'inc-normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}), ('/included/view_class/42/37/', 'inc-view-class', '', '', 'inc-view-class', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}), # Unnamed args are dropped if you have *any* kwargs in a pattern ('/mixed_args/42/37/', 'mixed-args', '', '', 'mixed-args', views.empty_view, tuple(), {'arg2': '37'}), ('/included/mixed_args/42/37/', 'inc-mixed-args', '', '', 'inc-mixed-args', views.empty_view, tuple(), {'arg2': '37'}), ('/included/12/mixed_args/42/37/', 'inc-mixed-args', '', '', 'inc-mixed-args', views.empty_view, tuple(), {'arg2': '37'}), # Unnamed views should have None as the url_name. Regression data for #21157. ('/unnamed/normal/42/37/', None, '', '', 'urlpatterns_reverse.views.empty_view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}), ('/unnamed/view_class/42/37/', None, '', '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}), # If you have no kwargs, you get an args list. ('/no_kwargs/42/37/', 'no-kwargs', '', '', 'no-kwargs', views.empty_view, ('42', '37'), {}), ('/included/no_kwargs/42/37/', 'inc-no-kwargs', '', '', 'inc-no-kwargs', views.empty_view, ('42', '37'), {}), ('/included/12/no_kwargs/42/37/', 'inc-no-kwargs', '', '', 'inc-no-kwargs', views.empty_view, ('12', '42', '37'), {}), # Namespaces ('/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}), ('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}), ('/ns-included1/normal/42/37/', 'inc-normal-view', '', 'inc-ns1', 'inc-ns1:inc-normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}), ('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}), ('/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}), ('/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}), ('/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'other-ns1:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}), # Nested namespaces ('/ns-included1/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:test-ns3', 'inc-ns1:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}), ('/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:inc-ns4:inc-ns2:test-ns3', 'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}), ('/app-included/test3/inner/42/37/', 'urlobject-view', 'inc-app:testapp', 'inc-app:test-ns3', 'inc-app:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}), ('/app-included/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'inc-app:testapp', 'inc-app:inc-ns4:inc-ns2:test-ns3', 'inc-app:inc-ns4:inc-ns2:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}), # Namespaces capturing variables ('/inc70/', 'inner-nothing', '', 'inc-ns5', 'inc-ns5:inner-nothing', views.empty_view, tuple(), {'outer': '70'}), ('/inc78/extra/foobar/', 'inner-extra', '', 'inc-ns5', 'inc-ns5:inner-extra', views.empty_view, tuple(), {'outer': '78', 'extra': 'foobar'}), ) test_data = ( ('places', '/places/3/', [3], {}), ('places', '/places/3/', ['3'], {}), ('places', NoReverseMatch, ['a'], {}), ('places', NoReverseMatch, [], {}), ('places?', '/place/', [], {}), ('places+', '/places/', [], {}), ('places*', '/place/', [], {}), ('places2?', '/', [], {}), ('places2+', '/places/', [], {}), ('places2*', '/', [], {}), ('places3', '/places/4/', [4], {}), ('places3', '/places/harlem/', ['harlem'], {}), ('places3', NoReverseMatch, ['harlem64'], {}), ('places4', '/places/3/', [], {'id': 3}), ('people', NoReverseMatch, [], {}), ('people', '/people/adrian/', ['adrian'], {}), ('people', '/people/adrian/', [], {'name': 'adrian'}), ('people', NoReverseMatch, ['name with spaces'], {}), ('people', NoReverseMatch, [], {'name': 'name with spaces'}), ('people2', '/people/name/', [], {}), ('people2a', '/people/name/fred/', ['fred'], {}), ('people_backref', '/people/nate-nate/', ['nate'], {}), ('people_backref', '/people/nate-nate/', [], {'name': 'nate'}), ('optional', '/optional/fred/', [], {'name': 'fred'}), ('optional', '/optional/fred/', ['fred'], {}), ('named_optional', '/optional/1/', [1], {}), ('named_optional', '/optional/1/', [], {'arg1': 1}), ('named_optional', '/optional/1/2/', [1, 2], {}), ('named_optional', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}), ('named_optional_terminated', '/optional/1/2/', [1, 2], {}), ('named_optional_terminated', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}), ('hardcoded', '/hardcoded/', [], {}), ('hardcoded2', '/hardcoded/doc.pdf', [], {}), ('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}), ('people3', NoReverseMatch, [], {'state': 'il'}), ('people3', NoReverseMatch, [], {'name': 'adrian'}), ('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}), ('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}), ('people6', '/people//adrian/', ['adrian'], {}), ('range', '/character_set/a/', [], {}), ('range2', '/character_set/x/', [], {}), ('price', '/price/$10/', ['10'], {}), ('price2', '/price/$10/', ['10'], {}), ('price3', '/price/$10/', ['10'], {}), ('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}), ('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)), ('windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [], dict(drive_name='C', path=r'Documents and Settings\spam')), ('special', r'/special_chars/~@+%5C$*%7C/', [r'~@+\$*|'], {}), ('special', r'/special_chars/some%20resource/', [r'some resource'], {}), ('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}), ('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}), ('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}), ('special', NoReverseMatch, [''], {}), ('mixed', '/john/0/', [], {'name': 'john'}), ('repeats', '/repeats/a/', [], {}), ('repeats2', '/repeats/aa/', [], {}), ('repeats3', '/repeats/aa/', [], {}), ('insensitive', '/CaseInsensitive/fred', ['fred'], {}), ('test', '/test/1', [], {}), ('test2', '/test/2', [], {}), ('inner-nothing', '/outer/42/', [], {'outer': '42'}), ('inner-nothing', '/outer/42/', ['42'], {}), ('inner-nothing', NoReverseMatch, ['foo'], {}), ('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}), ('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}), ('inner-extra', NoReverseMatch, ['fred', 'inner'], {}), ('inner-no-kwargs', '/outer-no-kwargs/42/inner-no-kwargs/1/', ['42', '1'], {}), ('disjunction', NoReverseMatch, ['foo'], {}), ('inner-disjunction', NoReverseMatch, ['10', '11'], {}), ('extra-places', '/e-places/10/', ['10'], {}), ('extra-people', '/e-people/fred/', ['fred'], {}), ('extra-people', '/e-people/fred/', [], {'name': 'fred'}), ('part', '/part/one/', [], {'value': 'one'}), ('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}), ('part2', '/part2/one/', [], {'value': 'one'}), ('part2', '/part2/', [], {}), ('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}), ('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}), # Tests for nested groups. Nested capturing groups will only work if you # *only* supply the correct outer group. ('nested-noncapture', '/nested/noncapture/opt', [], {'p': 'opt'}), ('nested-capture', '/nested/capture/opt/', ['opt/'], {}), ('nested-capture', NoReverseMatch, [], {'p': 'opt'}), ('nested-mixedcapture', '/nested/capture/mixed/opt', ['opt'], {}), ('nested-mixedcapture', NoReverseMatch, [], {'p': 'opt'}), ('nested-namedcapture', '/nested/capture/named/opt/', [], {'outer': 'opt/'}), ('nested-namedcapture', NoReverseMatch, [], {'outer': 'opt/', 'inner': 'opt'}), ('nested-namedcapture', NoReverseMatch, [], {'inner': 'opt'}), # Regression for #9038 # These views are resolved by method name. Each method is deployed twice - # once with an explicit argument, and once using the default value on # the method. This is potentially ambiguous, as you have to pick the # correct view for the arguments provided. ('urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/', [], {}), ('urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/10/', [], {'arg1': 10}), ('non_path_include', '/includes/non_path_include/', [], {}), # Tests for #13154 ('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}), ('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}), ('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}), ('defaults', NoReverseMatch, [], {'arg2': 1}), # Security tests ('security', '/%2Fexample.com/security/', ['/example.com'], {}), ) class URLObject(object): urlpatterns = [ url(r'^inner/$', views.empty_view, name='urlobject-view'), url(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'), url(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'), ] def __init__(self, app_name, namespace=None): self.app_name = app_name self.namespace = namespace @property def urls(self): return self.urlpatterns, self.app_name, self.namespace @property def app_urls(self): return self.urlpatterns, self.app_name @override_settings(ROOT_URLCONF='urlpatterns_reverse.no_urls') class NoURLPatternsTests(SimpleTestCase): def test_no_urls_exception(self): """ RegexURLResolver should raise an exception when no urlpatterns exist. """ resolver = RegexURLResolver(r'^$', settings.ROOT_URLCONF) self.assertRaisesMessage( ImproperlyConfigured, "The included urlconf 'urlpatterns_reverse.no_urls' does not " "appear to have any patterns in it. If you see valid patterns in " "the file then the issue is probably caused by a circular import.", getattr, resolver, 'url_patterns' ) @override_settings(ROOT_URLCONF='urlpatterns_reverse.urls') class URLPatternReverse(SimpleTestCase): @ignore_warnings(category=RemovedInDjango110Warning) def test_urlpattern_reverse(self): for name, expected, args, kwargs in test_data: try: got = reverse(name, args=args, kwargs=kwargs) except NoReverseMatch: self.assertEqual(expected, NoReverseMatch) else: self.assertEqual(got, expected) def test_reverse_none(self): # Reversing None should raise an error, not return the last un-named view. self.assertRaises(NoReverseMatch, reverse, None) @override_script_prefix('/{{invalid}}/') def test_prefix_braces(self): self.assertEqual( '/%7B%7Binvalid%7D%7D/includes/non_path_include/', reverse('non_path_include') ) def test_prefix_parenthesis(self): # Parentheses are allowed and should not cause errors or be escaped with override_script_prefix('/bogus)/'): self.assertEqual( '/bogus)/includes/non_path_include/', reverse('non_path_include') ) with override_script_prefix('/(bogus)/'): self.assertEqual( '/(bogus)/includes/non_path_include/', reverse('non_path_include') ) @override_script_prefix('/bump%20map/') def test_prefix_format_char(self): self.assertEqual( '/bump%2520map/includes/non_path_include/', reverse('non_path_include') ) @override_script_prefix('/%7Eme/') def test_non_urlsafe_prefix_with_args(self): # Regression for #20022, adjusted for #24013 because ~ is an unreserved # character. Tests whether % is escaped. self.assertEqual('/%257Eme/places/1/', reverse('places', args=[1])) def test_patterns_reported(self): # Regression for #17076 try: # this url exists, but requires an argument reverse("people", args=[]) except NoReverseMatch as e: pattern_description = r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']" self.assertIn(pattern_description, str(e)) else: # we can't use .assertRaises, since we want to inspect the # exception self.fail("Expected a NoReverseMatch, but none occurred.") @override_script_prefix('/script:name/') def test_script_name_escaping(self): self.assertEqual( reverse('optional', args=['foo:bar']), '/script:name/optional/foo:bar/' ) def test_reverse_returns_unicode(self): name, expected, args, kwargs = test_data[0] self.assertIsInstance( reverse(name, args=args, kwargs=kwargs), six.text_type ) class ResolverTests(unittest.TestCase): @ignore_warnings(category=RemovedInDjango20Warning) def test_resolver_repr(self): """ Test repr of RegexURLResolver, especially when urlconf_name is a list (#17892). """ # Pick a resolver from a namespaced urlconf resolver = get_resolver('urlpatterns_reverse.namespace_urls') sub_resolver = resolver.namespace_dict['test-ns1'][1] self.assertIn('<RegexURLPattern list>', repr(sub_resolver)) def test_reverse_lazy_object_coercion_by_resolve(self): """ Verifies lazy object returned by reverse_lazy is coerced to text by resolve(). Previous to #21043, this would raise a TypeError. """ urls = 'urlpatterns_reverse.named_urls' proxy_url = reverse_lazy('named-url1', urlconf=urls) resolver = get_resolver(urls) try: resolver.resolve(proxy_url) except TypeError: self.fail('Failed to coerce lazy object to text') def test_non_regex(self): """ Verifies that we raise a Resolver404 if what we are resolving doesn't meet the basic requirements of a path to match - i.e., at the very least, it matches the root pattern '^/'. We must never return None from resolve, or we will get a TypeError further down the line. Regression for #10834. """ self.assertRaises(Resolver404, resolve, '') self.assertRaises(Resolver404, resolve, 'a') self.assertRaises(Resolver404, resolve, '\\') self.assertRaises(Resolver404, resolve, '.') def test_404_tried_urls_have_names(self): """ Verifies that the list of URLs that come back from a Resolver404 exception contains a list in the right format for printing out in the DEBUG 404 page with both the patterns and URL names, if available. """ urls = 'urlpatterns_reverse.named_urls' # this list matches the expected URL types and names returned when # you try to resolve a non-existent URL in the first level of included # URLs in named_urls.py (e.g., '/included/non-existent-url') url_types_names = [ [{'type': RegexURLPattern, 'name': 'named-url1'}], [{'type': RegexURLPattern, 'name': 'named-url2'}], [{'type': RegexURLPattern, 'name': None}], [{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url3'}], [{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url4'}], [{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': None}], [{'type': RegexURLResolver}, {'type': RegexURLResolver}], ] try: resolve('/included/non-existent-url', urlconf=urls) self.fail('resolve did not raise a 404') except Resolver404 as e: # make sure we at least matched the root ('/') url resolver: self.assertIn('tried', e.args[0]) tried = e.args[0]['tried'] self.assertEqual(len(e.args[0]['tried']), len(url_types_names), 'Wrong number of tried URLs returned. Expected %s, got %s.' % (len(url_types_names), len(e.args[0]['tried']))) for tried, expected in zip(e.args[0]['tried'], url_types_names): for t, e in zip(tried, expected): self.assertIsInstance(t, e['type']), str('%s is not an instance of %s') % (t, e['type']) if 'name' in e: if not e['name']: self.assertIsNone(t.name, 'Expected no URL name but found %s.' % t.name) else: self.assertEqual(t.name, e['name'], 'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name)) @override_settings(ROOT_URLCONF='urlpatterns_reverse.reverse_lazy_urls') class ReverseLazyTest(TestCase): def test_redirect_with_lazy_reverse(self): response = self.client.get('/redirect/') self.assertRedirects(response, "/redirected_to/", status_code=302) def test_user_permission_with_lazy_reverse(self): User.objects.create_user('alfred', '[email protected]', password='testpw') response = self.client.get('/login_required_view/') self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302) self.client.login(username='alfred', password='testpw') response = self.client.get('/login_required_view/') self.assertEqual(response.status_code, 200) def test_inserting_reverse_lazy_into_string(self): self.assertEqual( 'Some URL: %s' % reverse_lazy('some-login-page'), 'Some URL: /login/' ) if six.PY2: self.assertEqual( b'Some URL: %s' % reverse_lazy('some-login-page'), 'Some URL: /login/' ) class ReverseLazySettingsTest(AdminScriptTestCase): """ Test that reverse_lazy can be used in settings without causing a circular import error. """ def setUp(self): self.write_settings('settings.py', extra=""" from django.core.urlresolvers import reverse_lazy LOGIN_URL = reverse_lazy('login')""") def tearDown(self): self.remove_settings('settings.py') def test_lazy_in_settings(self): out, err = self.run_manage(['check']) self.assertNoOutput(err) @override_settings(ROOT_URLCONF='urlpatterns_reverse.urls') class ReverseShortcutTests(SimpleTestCase): def test_redirect_to_object(self): # We don't really need a model; just something with a get_absolute_url class FakeObj(object): def get_absolute_url(self): return "/hi-there/" res = redirect(FakeObj()) self.assertIsInstance(res, HttpResponseRedirect) self.assertEqual(res.url, '/hi-there/') res = redirect(FakeObj(), permanent=True) self.assertIsInstance(res, HttpResponsePermanentRedirect) self.assertEqual(res.url, '/hi-there/') def test_redirect_to_view_name(self): res = redirect('hardcoded2') self.assertEqual(res.url, '/hardcoded/doc.pdf') res = redirect('places', 1) self.assertEqual(res.url, '/places/1/') res = redirect('headlines', year='2008', month='02', day='17') self.assertEqual(res.url, '/headlines/2008.02.17/') self.assertRaises(NoReverseMatch, redirect, 'not-a-view') def test_redirect_to_url(self): res = redirect('/foo/') self.assertEqual(res.url, '/foo/') res = redirect('http://example.com/') self.assertEqual(res.url, 'http://example.com/') # Assert that we can redirect using UTF-8 strings res = redirect('/æøå/abc/') self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5/abc/') # Assert that no imports are attempted when dealing with a relative path # (previously, the below would resolve in a UnicodeEncodeError from __import__ ) res = redirect('/æøå.abc/') self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5.abc/') res = redirect('os.path') self.assertEqual(res.url, 'os.path') def test_no_illegal_imports(self): # modules that are not listed in urlpatterns should not be importable redirect("urlpatterns_reverse.nonimported_module.view") self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules) @ignore_warnings(category=RemovedInDjango110Warning) def test_reverse_by_path_nested(self): # Views that are added to urlpatterns using include() should be # reversible by dotted path. self.assertEqual(reverse('urlpatterns_reverse.views.nested_view'), '/includes/nested_path/') def test_redirect_view_object(self): from .views import absolute_kwargs_view res = redirect(absolute_kwargs_view) self.assertEqual(res.url, '/absolute_arg_view/') self.assertRaises(NoReverseMatch, redirect, absolute_kwargs_view, wrong_argument=None) @override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls') @ignore_warnings(category=RemovedInDjango20Warning) class NamespaceTests(SimpleTestCase): def test_ambiguous_object(self): "Names deployed via dynamic URL objects that require namespaces can't be resolved" self.assertRaises(NoReverseMatch, reverse, 'urlobject-view') self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', args=[37, 42]) self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', kwargs={'arg1': 42, 'arg2': 37}) def test_ambiguous_urlpattern(self): "Names deployed via dynamic URL objects that require namespaces can't be resolved" self.assertRaises(NoReverseMatch, reverse, 'inner-nothing') self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', args=[37, 42]) self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', kwargs={'arg1': 42, 'arg2': 37}) def test_non_existent_namespace(self): "Non-existent namespaces raise errors" self.assertRaises(NoReverseMatch, reverse, 'blahblah:urlobject-view') self.assertRaises(NoReverseMatch, reverse, 'test-ns1:blahblah:urlobject-view') def test_normal_name(self): "Normal lookups work as expected" self.assertEqual('/normal/', reverse('normal-view')) self.assertEqual('/normal/37/42/', reverse('normal-view', args=[37, 42])) self.assertEqual('/normal/42/37/', reverse('normal-view', kwargs={'arg1': 42, 'arg2': 37})) self.assertEqual('/+%5C$*/', reverse('special-view')) def test_simple_included_name(self): "Normal lookups work on names included from other patterns" self.assertEqual('/included/normal/', reverse('inc-normal-view')) self.assertEqual('/included/normal/37/42/', reverse('inc-normal-view', args=[37, 42])) self.assertEqual('/included/normal/42/37/', reverse('inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})) self.assertEqual('/included/+%5C$*/', reverse('inc-special-view')) def test_namespace_object(self): "Dynamic URL objects can be found using a namespace" self.assertEqual('/test1/inner/', reverse('test-ns1:urlobject-view')) self.assertEqual('/test1/inner/37/42/', reverse('test-ns1:urlobject-view', args=[37, 42])) self.assertEqual('/test1/inner/42/37/', reverse('test-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})) self.assertEqual('/test1/inner/+%5C$*/', reverse('test-ns1:urlobject-special-view')) def test_app_object(self): "Dynamic URL objects can return a (pattern, app_name) 2-tuple, and include() can set the namespace" self.assertEqual('/newapp1/inner/', reverse('new-ns1:urlobject-view')) self.assertEqual('/newapp1/inner/37/42/', reverse('new-ns1:urlobject-view', args=[37, 42])) self.assertEqual('/newapp1/inner/42/37/', reverse('new-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})) self.assertEqual('/newapp1/inner/+%5C$*/', reverse('new-ns1:urlobject-special-view')) def test_app_object_default_namespace(self): "Namespace defaults to app_name when including a (pattern, app_name) 2-tuple" self.assertEqual('/new-default/inner/', reverse('newapp:urlobject-view')) self.assertEqual('/new-default/inner/37/42/', reverse('newapp:urlobject-view', args=[37, 42])) self.assertEqual('/new-default/inner/42/37/', reverse('newapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})) self.assertEqual('/new-default/inner/+%5C$*/', reverse('newapp:urlobject-special-view')) def test_embedded_namespace_object(self): "Namespaces can be installed anywhere in the URL pattern tree" self.assertEqual('/included/test3/inner/', reverse('test-ns3:urlobject-view')) self.assertEqual('/included/test3/inner/37/42/', reverse('test-ns3:urlobject-view', args=[37, 42])) self.assertEqual('/included/test3/inner/42/37/', reverse('test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})) self.assertEqual('/included/test3/inner/+%5C$*/', reverse('test-ns3:urlobject-special-view')) def test_namespace_pattern(self): "Namespaces can be applied to include()'d urlpatterns" self.assertEqual('/ns-included1/normal/', reverse('inc-ns1:inc-normal-view')) self.assertEqual('/ns-included1/normal/37/42/', reverse('inc-ns1:inc-normal-view', args=[37, 42])) self.assertEqual('/ns-included1/normal/42/37/', reverse('inc-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})) self.assertEqual('/ns-included1/+%5C$*/', reverse('inc-ns1:inc-special-view')) def test_app_name_pattern(self): "Namespaces can be applied to include()'d urlpatterns that set an app_name attribute" self.assertEqual('/app-included1/normal/', reverse('app-ns1:inc-normal-view')) self.assertEqual('/app-included1/normal/37/42/', reverse('app-ns1:inc-normal-view', args=[37, 42])) self.assertEqual('/app-included1/normal/42/37/', reverse('app-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})) self.assertEqual('/app-included1/+%5C$*/', reverse('app-ns1:inc-special-view')) def test_namespace_pattern_with_variable_prefix(self): "When using an include with namespaces when there is a regex variable in front of it" self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', kwargs={'outer': 42})) self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', args=[42])) self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', kwargs={'outer': 42, 'arg1': 37, 'arg2': 4})) self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', args=[42, 37, 4])) self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', kwargs={'outer': 42})) self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', args=[42])) def test_multiple_namespace_pattern(self): "Namespaces can be embedded" self.assertEqual('/ns-included1/test3/inner/', reverse('inc-ns1:test-ns3:urlobject-view')) self.assertEqual('/ns-included1/test3/inner/37/42/', reverse('inc-ns1:test-ns3:urlobject-view', args=[37, 42])) self.assertEqual('/ns-included1/test3/inner/42/37/', reverse('inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})) self.assertEqual('/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:test-ns3:urlobject-special-view')) def test_nested_namespace_pattern(self): "Namespaces can be nested" self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view')) self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/37/42/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', args=[37, 42])) self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/42/37/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})) self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view')) def test_app_lookup_object(self): "A default application namespace can be used for lookup" self.assertEqual('/default/inner/', reverse('testapp:urlobject-view')) self.assertEqual('/default/inner/37/42/', reverse('testapp:urlobject-view', args=[37, 42])) self.assertEqual('/default/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})) self.assertEqual('/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view')) def test_app_lookup_object_with_default(self): "A default application namespace is sensitive to the 'current' app can be used for lookup" self.assertEqual('/included/test3/inner/', reverse('testapp:urlobject-view', current_app='test-ns3')) self.assertEqual('/included/test3/inner/37/42/', reverse('testapp:urlobject-view', args=[37, 42], current_app='test-ns3')) self.assertEqual('/included/test3/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='test-ns3')) self.assertEqual('/included/test3/inner/+%5C$*/', reverse('testapp:urlobject-special-view', current_app='test-ns3')) def test_app_lookup_object_without_default(self): "An application namespace without a default is sensitive to the 'current' app can be used for lookup" self.assertEqual('/other2/inner/', reverse('nodefault:urlobject-view')) self.assertEqual('/other2/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42])) self.assertEqual('/other2/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})) self.assertEqual('/other2/inner/+%5C$*/', reverse('nodefault:urlobject-special-view')) self.assertEqual('/other1/inner/', reverse('nodefault:urlobject-view', current_app='other-ns1')) self.assertEqual('/other1/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42], current_app='other-ns1')) self.assertEqual('/other1/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='other-ns1')) self.assertEqual('/other1/inner/+%5C$*/', reverse('nodefault:urlobject-special-view', current_app='other-ns1')) def test_special_chars_namespace(self): self.assertEqual('/+%5C$*/included/normal/', reverse('special:inc-normal-view')) self.assertEqual('/+%5C$*/included/normal/37/42/', reverse('special:inc-normal-view', args=[37, 42])) self.assertEqual('/+%5C$*/included/normal/42/37/', reverse('special:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})) self.assertEqual('/+%5C$*/included/+%5C$*/', reverse('special:inc-special-view')) def test_namespaces_with_variables(self): "Namespace prefixes can capture variables: see #15900" self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', kwargs={'outer': '70'})) self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', kwargs={'outer': '78', 'extra': 'foobar'})) self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', args=['70'])) self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', args=['78', 'foobar'])) def test_nested_app_lookup(self): "A nested current_app should be split in individual namespaces (#24904)" self.assertEqual('/ns-included1/test4/inner/', reverse('inc-ns1:testapp:urlobject-view')) self.assertEqual('/ns-included1/test4/inner/37/42/', reverse('inc-ns1:testapp:urlobject-view', args=[37, 42])) self.assertEqual( '/ns-included1/test4/inner/42/37/', reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}) ) self.assertEqual('/ns-included1/test4/inner/+%5C$*/', reverse('inc-ns1:testapp:urlobject-special-view')) self.assertEqual( '/ns-included1/test3/inner/', reverse('inc-ns1:testapp:urlobject-view', current_app='inc-ns1:test-ns3') ) self.assertEqual( '/ns-included1/test3/inner/37/42/', reverse('inc-ns1:testapp:urlobject-view', args=[37, 42], current_app='inc-ns1:test-ns3') ) self.assertEqual( '/ns-included1/test3/inner/42/37/', reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='inc-ns1:test-ns3') ) self.assertEqual( '/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:testapp:urlobject-special-view', current_app='inc-ns1:test-ns3') ) def test_current_app_no_partial_match(self): "current_app should either match the whole path or shouldn't be used" self.assertEqual( '/ns-included1/test4/inner/', reverse('inc-ns1:testapp:urlobject-view', current_app='non-existant:test-ns3') ) self.assertEqual( '/ns-included1/test4/inner/37/42/', reverse('inc-ns1:testapp:urlobject-view', args=[37, 42], current_app='non-existant:test-ns3') ) self.assertEqual( '/ns-included1/test4/inner/42/37/', reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='non-existant:test-ns3') ) self.assertEqual( '/ns-included1/test4/inner/+%5C$*/', reverse('inc-ns1:testapp:urlobject-special-view', current_app='non-existant:test-ns3') ) @override_settings(ROOT_URLCONF=urlconf_outer.__name__) class RequestURLconfTests(SimpleTestCase): def test_urlconf(self): response = self.client.get('/test/me/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/') response = self.client.get('/inner_urlconf/second_test/') self.assertEqual(response.status_code, 200) response = self.client.get('/second_test/') self.assertEqual(response.status_code, 404) @override_settings( MIDDLEWARE_CLASSES=[ '%s.ChangeURLconfMiddleware' % middleware.__name__, ] ) def test_urlconf_overridden(self): response = self.client.get('/test/me/') self.assertEqual(response.status_code, 404) response = self.client.get('/inner_urlconf/second_test/') self.assertEqual(response.status_code, 404) response = self.client.get('/second_test/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'outer:,inner:/second_test/') @override_settings( MIDDLEWARE_CLASSES=[ '%s.NullChangeURLconfMiddleware' % middleware.__name__, ] ) def test_urlconf_overridden_with_null(self): """ Overriding request.urlconf with None will fall back to the default URLconf. """ response = self.client.get('/test/me/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/') response = self.client.get('/inner_urlconf/second_test/') self.assertEqual(response.status_code, 200) response = self.client.get('/second_test/') self.assertEqual(response.status_code, 404) @override_settings( MIDDLEWARE_CLASSES=[ '%s.ChangeURLconfMiddleware' % middleware.__name__, '%s.ReverseInnerInResponseMiddleware' % middleware.__name__, ] ) def test_reverse_inner_in_response_middleware(self): """ Test reversing an URL from the *overridden* URLconf from inside a response middleware. """ response = self.client.get('/second_test/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'/second_test/') @override_settings( MIDDLEWARE_CLASSES=[ '%s.ChangeURLconfMiddleware' % middleware.__name__, '%s.ReverseOuterInResponseMiddleware' % middleware.__name__, ] ) def test_reverse_outer_in_response_middleware(self): """ Test reversing an URL from the *default* URLconf from inside a response middleware. """ message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found." with self.assertRaisesMessage(NoReverseMatch, message): self.client.get('/second_test/') @override_settings( MIDDLEWARE_CLASSES=[ '%s.ChangeURLconfMiddleware' % middleware.__name__, '%s.ReverseInnerInStreaming' % middleware.__name__, ] ) def test_reverse_inner_in_streaming(self): """ Test reversing an URL from the *overridden* URLconf from inside a streaming response. """ response = self.client.get('/second_test/') self.assertEqual(response.status_code, 200) self.assertEqual(b''.join(response), b'/second_test/') @override_settings( MIDDLEWARE_CLASSES=[ '%s.ChangeURLconfMiddleware' % middleware.__name__, '%s.ReverseOuterInStreaming' % middleware.__name__, ] ) def test_reverse_outer_in_streaming(self): """ Test reversing an URL from the *default* URLconf from inside a streaming response. """ message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found." with self.assertRaisesMessage(NoReverseMatch, message): self.client.get('/second_test/') b''.join(self.client.get('/second_test/')) class ErrorHandlerResolutionTests(SimpleTestCase): """Tests for handler400, handler404 and handler500""" def setUp(self): urlconf = 'urlpatterns_reverse.urls_error_handlers' urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables' self.resolver = RegexURLResolver(r'^$', urlconf) self.callable_resolver = RegexURLResolver(r'^$', urlconf_callables) def test_named_handlers(self): handler = (empty_view, {}) self.assertEqual(self.resolver.resolve_error_handler(400), handler) self.assertEqual(self.resolver.resolve_error_handler(404), handler) self.assertEqual(self.resolver.resolve_error_handler(500), handler) def test_callable_handers(self): handler = (empty_view, {}) self.assertEqual(self.callable_resolver.resolve_error_handler(400), handler) self.assertEqual(self.callable_resolver.resolve_error_handler(404), handler) self.assertEqual(self.callable_resolver.resolve_error_handler(500), handler) @override_settings(ROOT_URLCONF='urlpatterns_reverse.urls_without_full_import') class DefaultErrorHandlerTests(SimpleTestCase): def test_default_handler(self): "If the urls.py doesn't specify handlers, the defaults are used" try: response = self.client.get('/test/') self.assertEqual(response.status_code, 404) except AttributeError: self.fail("Shouldn't get an AttributeError due to undefined 404 handler") try: self.assertRaises(ValueError, self.client.get, '/bad_view/') except AttributeError: self.fail("Shouldn't get an AttributeError due to undefined 500 handler") @override_settings(ROOT_URLCONF=None) class NoRootUrlConfTests(SimpleTestCase): """Tests for handler404 and handler500 if urlconf is None""" def test_no_handler_exception(self): self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/') @override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls') class ResolverMatchTests(SimpleTestCase): @ignore_warnings(category=RemovedInDjango20Warning) def test_urlpattern_resolve(self): for path, url_name, app_name, namespace, view_name, func, args, kwargs in resolve_test_data: # Test legacy support for extracting "function, args, kwargs" match_func, match_args, match_kwargs = resolve(path) self.assertEqual(match_func, func) self.assertEqual(match_args, args) self.assertEqual(match_kwargs, kwargs) # Test ResolverMatch capabilities. match = resolve(path) self.assertEqual(match.__class__, ResolverMatch) self.assertEqual(match.url_name, url_name) self.assertEqual(match.app_name, app_name) self.assertEqual(match.namespace, namespace) self.assertEqual(match.view_name, view_name) self.assertEqual(match.func, func) self.assertEqual(match.args, args) self.assertEqual(match.kwargs, kwargs) # ... and for legacy purposes: self.assertEqual(match[0], func) self.assertEqual(match[1], args) self.assertEqual(match[2], kwargs) @ignore_warnings(category=RemovedInDjango20Warning) def test_resolver_match_on_request(self): response = self.client.get('/resolver_match/') resolver_match = response.resolver_match self.assertEqual(resolver_match.url_name, 'test-resolver-match') def test_resolver_match_on_request_before_resolution(self): request = HttpRequest() self.assertIsNone(request.resolver_match) @override_settings(ROOT_URLCONF='urlpatterns_reverse.erroneous_urls') class ErroneousViewTests(SimpleTestCase): def test_erroneous_resolve(self): self.assertRaises(ImportError, self.client.get, '/erroneous_inner/') self.assertRaises(ImportError, self.client.get, '/erroneous_outer/') self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_inner/') self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_outer/') self.assertRaises(ViewDoesNotExist, self.client.get, '/uncallable-dotted/') self.assertRaises(ViewDoesNotExist, self.client.get, '/uncallable-object/') # Regression test for #21157 self.assertRaises(ImportError, self.client.get, '/erroneous_unqualified/') def test_erroneous_reverse(self): """ Ensure that a useful exception is raised when a regex is invalid in the URLConf (#6170). """ # The regex error will be hit before NoReverseMatch can be raised self.assertRaises(ImproperlyConfigured, reverse, 'whatever blah blah') class ViewLoadingTests(SimpleTestCase): def test_view_loading(self): self.assertEqual(get_callable('urlpatterns_reverse.views.empty_view'), empty_view) # passing a callable should return the callable self.assertEqual(get_callable(empty_view), empty_view) def test_exceptions(self): # A missing view (identified by an AttributeError) should raise # ViewDoesNotExist, ... with six.assertRaisesRegex(self, ViewDoesNotExist, ".*View does not exist in.*"): get_callable('urlpatterns_reverse.views.i_should_not_exist') # ... but if the AttributeError is caused by something else don't # swallow it. with self.assertRaises(AttributeError): get_callable('urlpatterns_reverse.views_broken.i_am_broken') class IncludeTests(SimpleTestCase): url_patterns = [ url(r'^inner/$', views.empty_view, name='urlobject-view'), url(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'), url(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'), ] app_urls = URLObject('inc-app') def test_include_app_name_but_no_namespace(self): msg = "Must specify a namespace if specifying app_name." with self.assertRaisesMessage(ValueError, msg): include(self.url_patterns, app_name='bar') def test_include_urls(self): self.assertEqual(include(self.url_patterns), (self.url_patterns, None, None)) @ignore_warnings(category=RemovedInDjango20Warning) def test_include_namespace(self): # no app_name -> deprecated self.assertEqual(include(self.url_patterns, 'namespace'), (self.url_patterns, None, 'namespace')) @ignore_warnings(category=RemovedInDjango20Warning) def test_include_namespace_app_name(self): # app_name argument to include -> deprecated self.assertEqual( include(self.url_patterns, 'namespace', 'app_name'), (self.url_patterns, 'app_name', 'namespace') ) @ignore_warnings(category=RemovedInDjango20Warning) def test_include_3_tuple(self): # 3-tuple -> deprecated self.assertEqual( include((self.url_patterns, 'app_name', 'namespace')), (self.url_patterns, 'app_name', 'namespace') ) def test_include_2_tuple(self): self.assertEqual( include((self.url_patterns, 'app_name')), (self.url_patterns, 'app_name', 'app_name') ) def test_include_2_tuple_namespace(self): self.assertEqual( include((self.url_patterns, 'app_name'), namespace='namespace'), (self.url_patterns, 'app_name', 'namespace') ) def test_include_app_name(self): self.assertEqual( include(self.app_urls), (self.app_urls, 'inc-app', 'inc-app') ) def test_include_app_name_namespace(self): self.assertEqual( include(self.app_urls, 'namespace'), (self.app_urls, 'inc-app', 'namespace') ) @override_settings(ROOT_URLCONF='urlpatterns_reverse.urls') class LookaheadTests(SimpleTestCase): def test_valid_resolve(self): test_urls = [ '/lookahead-/a-city/', '/lookbehind-/a-city/', '/lookahead+/a-city/', '/lookbehind+/a-city/', ] for test_url in test_urls: match = resolve(test_url) self.assertEqual(match.kwargs, {'city': 'a-city'}) def test_invalid_resolve(self): test_urls = [ '/lookahead-/not-a-city/', '/lookbehind-/not-a-city/', '/lookahead+/other-city/', '/lookbehind+/other-city/', ] for test_url in test_urls: with self.assertRaises(Resolver404): resolve(test_url) def test_valid_reverse(self): url = reverse('lookahead-positive', kwargs={'city': 'a-city'}) self.assertEqual(url, '/lookahead+/a-city/') url = reverse('lookahead-negative', kwargs={'city': 'a-city'}) self.assertEqual(url, '/lookahead-/a-city/') url = reverse('lookbehind-positive', kwargs={'city': 'a-city'}) self.assertEqual(url, '/lookbehind+/a-city/') url = reverse('lookbehind-negative', kwargs={'city': 'a-city'}) self.assertEqual(url, '/lookbehind-/a-city/') def test_invalid_reverse(self): with self.assertRaises(NoReverseMatch): reverse('lookahead-positive', kwargs={'city': 'other-city'}) with self.assertRaises(NoReverseMatch): reverse('lookahead-negative', kwargs={'city': 'not-a-city'}) with self.assertRaises(NoReverseMatch): reverse('lookbehind-positive', kwargs={'city': 'other-city'}) with self.assertRaises(NoReverseMatch): reverse('lookbehind-negative', kwargs={'city': 'not-a-city'})
bsd-3-clause
nagisa/Feeds
gdist/gschemas.py
1
2161
import glob import os from distutils.dep_util import newer from distutils.core import Command from distutils.spawn import find_executable from distutils.util import change_root class build_gschemas(Command): """build message catalog files Build message catalog (.mo) files from .po files using xgettext and intltool. These are placed directly in the build tree. """ description = "build gschemas used for dconf" user_options = [] build_base = None def initialize_options(self): pass def finalize_options(self): self.gschemas_directory = self.distribution.gschemas self.set_undefined_options('build', ('build_base', 'build_base')) def run(self): if find_executable("glib-compile-schemas") is None: raise SystemExit("Error: 'glib-compile-schemas' not found.") basepath = os.path.join(self.build_base, 'share', 'glib-2.0', 'schemas') self.copy_tree(self.gschemas_directory, basepath) class install_gschemas(Command): """install message catalog files Copy compiled message catalog files into their installation directory, $prefix/share/locale/$lang/LC_MESSAGES/$package.mo. """ description = "install message catalog files" user_options = [] skip_build = None build_base = None install_base = None root = None def initialize_options(self): pass def finalize_options(self): self.set_undefined_options('build', ('build_base', 'build_base')) self.set_undefined_options( 'install', ('root', 'root'), ('install_base', 'install_base'), ('skip_build', 'skip_build')) def run(self): if not self.skip_build: self.run_command('build_gschemas') src = os.path.join(self.build_base, 'share', 'glib-2.0', 'schemas') dest = os.path.join(self.install_base, 'share', 'glib-2.0', 'schemas') if self.root != None: dest = change_root(self.root, dest) self.copy_tree(src, dest) self.spawn(['glib-compile-schemas', dest]) __all__ = ["build_gschemas", "install_gschemas"]
gpl-2.0
Distrotech/samba
source4/scripting/python/samba/tests/dcerpc/registry.py
20
1923
#!/usr/bin/env python # Unix SMB/CIFS implementation. # Copyright (C) Jelmer Vernooij <[email protected]> 2008 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """Tests for samba.dcerpc.registry.""" from samba.dcerpc import winreg from samba.tests import RpcInterfaceTestCase class WinregTests(RpcInterfaceTestCase): def setUp(self): super(WinregTests, self).setUp() self.conn = winreg.winreg("ncalrpc:", self.get_loadparm(), self.get_credentials()) def get_hklm(self): return self.conn.OpenHKLM(None, winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS) def test_hklm(self): handle = self.conn.OpenHKLM(None, winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS) self.conn.CloseKey(handle) def test_getversion(self): handle = self.get_hklm() version = self.conn.GetVersion(handle) self.assertEquals(int, version.__class__) self.conn.CloseKey(handle) def test_getkeyinfo(self): handle = self.conn.OpenHKLM(None, winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS) x = self.conn.QueryInfoKey(handle, winreg.String()) self.assertEquals(9, len(x)) # should return a 9-tuple self.conn.CloseKey(handle)
gpl-3.0
geekboxzone/lollipop_external_chromium_org_third_party_WebKit
Tools/Scripts/webkitpy/style/main_unittest.py
53
3459
# Copyright (C) 2010 Chris Jerdonek ([email protected]) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest from main import change_directory from webkitpy.common.system.filesystem_mock import MockFileSystem from webkitpy.common.system.logtesting import LogTesting class ChangeDirectoryTest(unittest.TestCase): _original_directory = "/original" _checkout_root = "/WebKit" def setUp(self): self._log = LogTesting.setUp(self) self.filesystem = MockFileSystem(dirs=[self._original_directory, self._checkout_root], cwd=self._original_directory) def tearDown(self): self._log.tearDown() def _change_directory(self, paths, checkout_root): return change_directory(self.filesystem, paths=paths, checkout_root=checkout_root) def _assert_result(self, actual_return_value, expected_return_value, expected_log_messages, expected_current_directory): self.assertEqual(actual_return_value, expected_return_value) self._log.assertMessages(expected_log_messages) self.assertEqual(self.filesystem.getcwd(), expected_current_directory) def test_paths_none(self): paths = self._change_directory(checkout_root=self._checkout_root, paths=None) self._assert_result(paths, None, [], self._checkout_root) def test_paths_convertible(self): paths = ["/WebKit/foo1.txt", "/WebKit/foo2.txt"] paths = self._change_directory(checkout_root=self._checkout_root, paths=paths) self._assert_result(paths, ["foo1.txt", "foo2.txt"], [], self._checkout_root) def test_with_scm_paths_unconvertible(self): paths = ["/WebKit/foo1.txt", "/outside/foo2.txt"] paths = self._change_directory(checkout_root=self._checkout_root, paths=paths) log_messages = [ """WARNING: Path-dependent style checks may not work correctly: One of the given paths is outside the WebKit checkout of the current working directory: Path: /outside/foo2.txt Checkout root: /WebKit Pass only files below the checkout root to ensure correct results. See the help documentation for more info. """] self._assert_result(paths, paths, log_messages, self._original_directory)
bsd-3-clause
dfalt974/SickRage
lib/sqlalchemy/testing/config.py
76
2116
# testing/config.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import collections requirements = None db = None db_url = None db_opts = None file_config = None _current = None class Config(object): def __init__(self, db, db_opts, options, file_config): self.db = db self.db_opts = db_opts self.options = options self.file_config = file_config _stack = collections.deque() _configs = {} @classmethod def register(cls, db, db_opts, options, file_config, namespace): """add a config as one of the global configs. If there are no configs set up yet, this config also gets set as the "_current". """ cfg = Config(db, db_opts, options, file_config) global _current if not _current: cls.set_as_current(cfg, namespace) cls._configs[cfg.db.name] = cfg cls._configs[(cfg.db.name, cfg.db.dialect)] = cfg cls._configs[cfg.db] = cfg @classmethod def set_as_current(cls, config, namespace): global db, _current, db_url _current = config db_url = config.db.url namespace.db = db = config.db @classmethod def push_engine(cls, db, namespace): assert _current, "Can't push without a default Config set up" cls.push( Config(db, _current.db_opts, _current.options, _current.file_config), namespace ) @classmethod def push(cls, config, namespace): cls._stack.append(_current) cls.set_as_current(config, namespace) @classmethod def reset(cls, namespace): if cls._stack: cls.set_as_current(cls._stack[0], namespace) cls._stack.clear() @classmethod def all_configs(cls): for cfg in set(cls._configs.values()): yield cfg @classmethod def all_dbs(cls): for cfg in cls.all_configs(): yield cfg.db
gpl-3.0
tobspr/LUI
Demos/B_BlockText.py
1
2782
from DemoFramework import DemoFramework from LUILabel import LUILabel from LUIBlockText import LUIBlockText from LUIScrollableRegion import LUIScrollableRegion import random f = DemoFramework() f.prepare_demo("LUIBlockText") # Constructor f.add_constructor_parameter("text", "u'Label'") f.add_constructor_parameter("shadow", "True") f.add_constructor_parameter("font_size", "14") f.add_constructor_parameter("font", "'label'") # Functions f.add_public_function("clear", []) f.add_public_function("set_text", [("text", "string")]) f.add_public_function("set_wrap", [("wrap", "boolean")]) f.add_public_function("set_width", [("width", "integer")]) f.add_property("labels", "list") # Events f.construct_sourcecode("LUIBlockText") text_container = LUIScrollableRegion( parent=f.get_widget_node(), width=340, height=190, padding=0, ) #TODO: Support newline through charcode 10 #TODO: If space causes next line, dont print it # Create a new label label = LUIBlockText(parent=text_container, width=310) # Paragraph with no line breaks label.add( text='''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed malesuada sit amet erat non gravida. Pellentesque sit amet cursus risus Sed egestas, nulla in tempor cursus, ante felis cursus magna, nec vehicula nisi nulla eu nulla.''', color=(0.9,0.9,.9), wordwrap=True, padding=5, ) # Paragraph with some linebreaks label.add( text='''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed malesuada sit amet erat non gravida. Pellentesque sit amet cursus risus Sed egestas, nulla in tempor cursus, ante felis cursus magna, nec vehicula nisi nulla eu nulla. Nulla sed pellentesque erat. Morbi facilisis at erat id auctor. Phasellus euismod facilisis sem, at molestie velit condimentum sit amet. Nulla posuere rhoncus aliquam.''', color=(0.9,0.9,.9), wordwrap=True, padding=5, ) # Paragraph with no spaces or linebreaks label.add( text='''Loremipsumolorsitamet,consecteturadipiscingelit.Sedmalesuadasitameteratnongravida.PellentesquesitametcursusrisusSedegestas,nullaintemporcursus,antefeliscursusmagna,necvehiculanisinullaeunulla.''', color=(0.9,0.9,.9), wordwrap=True, padding=5, ) def setWidth(width): label.set_width(width) text_container.on_element_added() def setWrap(wrap): label.set_wrap(wrap) text_container.on_element_added() f.set_actions({ "Set Random Text": lambda: label.set_text(unicode(random.randint(100, 10000))), "Set Random Color": lambda: label.set_color((random.random(), random.random(), random.random(), 1)), "Clear": lambda: label.clear(), "Smaller": lambda: setWidth(200), "Larger": lambda: setWidth(310), "Wrapping on": lambda: setWrap(True), "Wrapping off": lambda: setWrap(False), }) base.run()
mit
jmetzen/scikit-learn
sklearn/base.py
22
18131
"""Base classes for all estimators.""" # Author: Gael Varoquaux <[email protected]> # License: BSD 3 clause import copy import warnings import numpy as np from scipy import sparse from .externals import six from .utils.fixes import signature from .utils.deprecation import deprecated from .exceptions import ChangedBehaviorWarning as ChangedBehaviorWarning_ class ChangedBehaviorWarning(ChangedBehaviorWarning_): pass ChangedBehaviorWarning = deprecated("ChangedBehaviorWarning has been moved " "into the sklearn.exceptions module. " "It will not be available here from " "version 0.19")(ChangedBehaviorWarning) ############################################################################## def clone(estimator, safe=True): """Constructs a new estimator with the same parameters. Clone does a deep copy of the model in an estimator without actually copying attached data. It yields a new estimator with the same parameters that has not been fit on any data. Parameters ---------- estimator: estimator object, or list, tuple or set of objects The estimator or group of estimators to be cloned safe: boolean, optional If safe is false, clone will fall back to a deepcopy on objects that are not estimators. """ estimator_type = type(estimator) # XXX: not handling dictionaries if estimator_type in (list, tuple, set, frozenset): return estimator_type([clone(e, safe=safe) for e in estimator]) elif not hasattr(estimator, 'get_params'): if not safe: return copy.deepcopy(estimator) else: raise TypeError("Cannot clone object '%s' (type %s): " "it does not seem to be a scikit-learn estimator " "as it does not implement a 'get_params' methods." % (repr(estimator), type(estimator))) klass = estimator.__class__ new_object_params = estimator.get_params(deep=False) for name, param in six.iteritems(new_object_params): new_object_params[name] = clone(param, safe=False) new_object = klass(**new_object_params) params_set = new_object.get_params(deep=False) # quick sanity check of the parameters of the clone for name in new_object_params: param1 = new_object_params[name] param2 = params_set[name] if isinstance(param1, np.ndarray): # For most ndarrays, we do not test for complete equality if not isinstance(param2, type(param1)): equality_test = False elif (param1.ndim > 0 and param1.shape[0] > 0 and isinstance(param2, np.ndarray) and param2.ndim > 0 and param2.shape[0] > 0): equality_test = ( param1.shape == param2.shape and param1.dtype == param2.dtype # We have to use '.flat' for 2D arrays and param1.flat[0] == param2.flat[0] and param1.flat[-1] == param2.flat[-1] ) else: equality_test = np.all(param1 == param2) elif sparse.issparse(param1): # For sparse matrices equality doesn't work if not sparse.issparse(param2): equality_test = False elif param1.size == 0 or param2.size == 0: equality_test = ( param1.__class__ == param2.__class__ and param1.size == 0 and param2.size == 0 ) else: equality_test = ( param1.__class__ == param2.__class__ and param1.data[0] == param2.data[0] and param1.data[-1] == param2.data[-1] and param1.nnz == param2.nnz and param1.shape == param2.shape ) else: new_obj_val = new_object_params[name] params_set_val = params_set[name] # The following construct is required to check equality on special # singletons such as np.nan that are not equal to them-selves: equality_test = (new_obj_val == params_set_val or new_obj_val is params_set_val) if not equality_test: raise RuntimeError('Cannot clone object %s, as the constructor ' 'does not seem to set parameter %s' % (estimator, name)) return new_object ############################################################################### def _pprint(params, offset=0, printer=repr): """Pretty print the dictionary 'params' Parameters ---------- params: dict The dictionary to pretty print offset: int The offset in characters to add at the begin of each line. printer: The function to convert entries to strings, typically the builtin str or repr """ # Do a multi-line justified repr: options = np.get_printoptions() np.set_printoptions(precision=5, threshold=64, edgeitems=2) params_list = list() this_line_length = offset line_sep = ',\n' + (1 + offset // 2) * ' ' for i, (k, v) in enumerate(sorted(six.iteritems(params))): if type(v) is float: # use str for representing floating point numbers # this way we get consistent representation across # architectures and versions. this_repr = '%s=%s' % (k, str(v)) else: # use repr of the rest this_repr = '%s=%s' % (k, printer(v)) if len(this_repr) > 500: this_repr = this_repr[:300] + '...' + this_repr[-100:] if i > 0: if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr): params_list.append(line_sep) this_line_length = len(line_sep) else: params_list.append(', ') this_line_length += 2 params_list.append(this_repr) this_line_length += len(this_repr) np.set_printoptions(**options) lines = ''.join(params_list) # Strip trailing space to avoid nightmare in doctests lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n')) return lines ############################################################################### class BaseEstimator(object): """Base class for all estimators in scikit-learn Notes ----- All estimators should specify all the parameters that can be set at the class level in their ``__init__`` as explicit keyword arguments (no ``*args`` or ``**kwargs``). """ @classmethod def _get_param_names(cls): """Get parameter names for the estimator""" # fetch the constructor or the original constructor before # deprecation wrapping if any init = getattr(cls.__init__, 'deprecated_original', cls.__init__) if init is object.__init__: # No explicit constructor to introspect return [] # introspect the constructor arguments to find the model parameters # to represent init_signature = signature(init) # Consider the constructor parameters excluding 'self' parameters = [p for p in init_signature.parameters.values() if p.name != 'self' and p.kind != p.VAR_KEYWORD] for p in parameters: if p.kind == p.VAR_POSITIONAL: raise RuntimeError("scikit-learn estimators should always " "specify their parameters in the signature" " of their __init__ (no varargs)." " %s with constructor %s doesn't " " follow this convention." % (cls, init_signature)) # Extract and sort argument names excluding 'self' return sorted([p.name for p in parameters]) def get_params(self, deep=True): """Get parameters for this estimator. Parameters ---------- deep: boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ out = dict() for key in self._get_param_names(): # We need deprecation warnings to always be on in order to # catch deprecated param values. # This is set in utils/__init__.py but it gets overwritten # when running under python3 somehow. warnings.simplefilter("always", DeprecationWarning) try: with warnings.catch_warnings(record=True) as w: value = getattr(self, key, None) if len(w) and w[0].category == DeprecationWarning: # if the parameter is deprecated, don't show it continue finally: warnings.filters.pop(0) # XXX: should we rather test if instance of estimator? if deep and hasattr(value, 'get_params'): deep_items = value.get_params().items() out.update((key + '__' + k, val) for k, val in deep_items) out[key] = value return out def set_params(self, **params): """Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The former have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Returns ------- self """ if not params: # Simple optimisation to gain speed (inspect is slow) return self valid_params = self.get_params(deep=True) for key, value in six.iteritems(params): split = key.split('__', 1) if len(split) > 1: # nested objects case name, sub_name = split if name not in valid_params: raise ValueError('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.' % (name, self)) sub_object = valid_params[name] sub_object.set_params(**{sub_name: value}) else: # simple objects case if key not in valid_params: raise ValueError('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.' % (key, self.__class__.__name__)) setattr(self, key, value) return self def __repr__(self): class_name = self.__class__.__name__ return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False), offset=len(class_name),),) ############################################################################### class ClassifierMixin(object): """Mixin class for all classifiers in scikit-learn.""" _estimator_type = "classifier" def score(self, X, y, sample_weight=None): """Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns ------- score : float Mean accuracy of self.predict(X) wrt. y. """ from .metrics import accuracy_score return accuracy_score(y, self.predict(X), sample_weight=sample_weight) ############################################################################### class RegressorMixin(object): """Mixin class for all regression estimators in scikit-learn.""" _estimator_type = "regressor" def score(self, X, y, sample_weight=None): """Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the regression sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual sum of squares ((y_true - y_true.mean()) ** 2).sum(). Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters ---------- X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns ------- score : float R^2 of self.predict(X) wrt. y. """ from .metrics import r2_score return r2_score(y, self.predict(X), sample_weight=sample_weight, multioutput='variance_weighted') ############################################################################### class ClusterMixin(object): """Mixin class for all cluster estimators in scikit-learn.""" _estimator_type = "clusterer" def fit_predict(self, X, y=None): """Performs clustering on X and returns cluster labels. Parameters ---------- X : ndarray, shape (n_samples, n_features) Input data. Returns ------- y : ndarray, shape (n_samples,) cluster labels """ # non-optimized default implementation; override when a better # method is possible for a given clustering algorithm self.fit(X) return self.labels_ class BiclusterMixin(object): """Mixin class for all bicluster estimators in scikit-learn""" @property def biclusters_(self): """Convenient way to get row and column indicators together. Returns the ``rows_`` and ``columns_`` members. """ return self.rows_, self.columns_ def get_indices(self, i): """Row and column indices of the i'th bicluster. Only works if ``rows_`` and ``columns_`` attributes exist. Returns ------- row_ind : np.array, dtype=np.intp Indices of rows in the dataset that belong to the bicluster. col_ind : np.array, dtype=np.intp Indices of columns in the dataset that belong to the bicluster. """ rows = self.rows_[i] columns = self.columns_[i] return np.nonzero(rows)[0], np.nonzero(columns)[0] def get_shape(self, i): """Shape of the i'th bicluster. Returns ------- shape : (int, int) Number of rows and columns (resp.) in the bicluster. """ indices = self.get_indices(i) return tuple(len(i) for i in indices) def get_submatrix(self, i, data): """Returns the submatrix corresponding to bicluster `i`. Works with sparse matrices. Only works if ``rows_`` and ``columns_`` attributes exist. """ from .utils.validation import check_array data = check_array(data, accept_sparse='csr') row_ind, col_ind = self.get_indices(i) return data[row_ind[:, np.newaxis], col_ind] ############################################################################### class TransformerMixin(object): """Mixin class for all transformers in scikit-learn.""" def fit_transform(self, X, y=None, **fit_params): """Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters ---------- X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns ------- X_new : numpy array of shape [n_samples, n_features_new] Transformed array. """ # non-optimized default implementation; override when a better # method is possible for a given clustering algorithm if y is None: # fit method of arity 1 (unsupervised transformation) return self.fit(X, **fit_params).transform(X) else: # fit method of arity 2 (supervised transformation) return self.fit(X, y, **fit_params).transform(X) ############################################################################### class MetaEstimatorMixin(object): """Mixin class for all meta estimators in scikit-learn.""" # this is just a tag for the moment ############################################################################### def is_classifier(estimator): """Returns True if the given estimator is (probably) a classifier.""" return getattr(estimator, "_estimator_type", None) == "classifier" def is_regressor(estimator): """Returns True if the given estimator is (probably) a regressor.""" return getattr(estimator, "_estimator_type", None) == "regressor"
bsd-3-clause
bioinformatics-ua/montra
emif/notifications/models.py
2
1769
# -*- coding: utf-8 -*- # Copyright (C) 2014 Universidade de Aveiro, DETI/IEETA, Bioinformatics Group - http://bioinformatics.ua.pt/ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from django.db import models from django.contrib.auth.models import User # Model for a new notification class Notification(models.Model): MESSAGE = 0 SYSTEM = 1 NOTIFICATION_TYPES = ( (MESSAGE, 'Private Message'), (SYSTEM, 'System Notification'), ) destiny = models.ForeignKey(User, related_name="destiny") # destinatary user for the notifications origin = models.ForeignKey(User, related_name="origin") #all notification have origin (this way we may later use this as a messaging system also ?) type = models.IntegerField(choices=NOTIFICATION_TYPES, default=SYSTEM) href = models.TextField(null=True) # this page can have a reference to somewhere notification = models.TextField() created_date = models.DateTimeField(auto_now_add=True) read_date = models.DateTimeField(null=True) read = models.BooleanField(default=False) removed = models.BooleanField(default=False) def __str__(self): return str(self.notification)
gpl-3.0
weblyzard/ewrt
src/eWRT/ws/yahoo/__init__.py
1
7712
#!/usr/bin/env python """ @package eWRT.ws.yahoo support for the yahoo! search @remarks this module is based on yahoo's boss search service """ from __future__ import print_function # (C)opyrights 2008-2010 by Albert Weichselbraun <[email protected]> # Heinz Peter Lang <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from future import standard_library standard_library.install_aliases() from builtins import map from builtins import object __version__ = "$Header$" from urllib.request import urlopen from urllib.parse import urlencode, quote from urllib.error import URLError from nose.plugins.attrib import attr from socket import setdefaulttimeout, timeout from eWRT.ws.TagInfoService import TagInfoService from eWRT.config import YAHOO_APP_ID, YAHOO_SEARCH_URL from eWRT.input.conv.html import HtmlToText from eWRT.access.http import Retrieve setdefaulttimeout(60) class Yahoo(TagInfoService): """ interfaces with yahoo's search service * Search: Yahoo! BOSS (see http://developer.yahoo.com/search/boss) """ __slots__ = ('r', ) def __init__(self): self.r = Retrieve( Yahoo.__name__, sleep_time=0 ) def query(self, terms, count=0, queryParams={} ): """ returns search results for the given terms @param[in] terms ... a list of search terms @param[in] count ... number of results to return (0 if we are interested on the search meta data only). @param[in] queryParams ... a dictionary of query parameters to add to the request @returns the search results """ assert ( isinstance(terms, tuple) or isinstance(terms, list) ) queryParams.update( {'appid': YAHOO_APP_ID, 'count': count, 'format': 'json' } ) params = urlencode( queryParams ) url = YAHOO_SEARCH_URL % "%2B".join(map( quote, terms) ) +"?"+ params print(url) try: result = eval( self.r.open(url).read().replace("\\/", "/" )) return result['ysearchresponse'] except (timeout, URLError): return "" @staticmethod def getSearchResults(query_result): """ returns a list of all search results returned by the given query result. @param[in] query_result Result of the query """ return [ YahooSearchResult(r) for r in query_result['resultset_web'] ] \ if 'resultset_web' in query_result else [] def getTagInfo(self, tag): """ @Override """ return int( self.query(tag)['totalhits'] ) class YahooSearchResult(object): """ Perfom manipulations on yahoo search results """ __slots__ = ('search_result') def __init__(self, search_result): """ @param[in] search_result ... search result to query """ self.search_result = search_result def getKeywords(self): """ @returns the keywords for the given search_result """ return self.search_result['keywords']['terms'] def getPageContent(self): """ @returns the content of the found web page """ return urlopen( self.search_result['url'] ).read() def getPageText(self): """ @returns the text of the found web page """ try: return HtmlToText.getText( self.getPageContent() ) except: return "" class TestYahoo(object): """ tests the yahoo search API """ SEARCH_QUERIES = { 'energy': ( ('energy', 'coal'), ('energy', 'sustainable') ), 'latex' : ( ('latex', 'bibtex'), ('latex', 'knutz') ) } def __init__(self): self.y = Yahoo() @attr("remote") def testSearchCounts(self): for query, refinedQueries in self.SEARCH_QUERIES.items(): qCount = int(self.y.query( (query, ) )['totalhits']) for q in refinedQueries: print(query, q, "**",qCount, int(self.y.query( q )['totalhits'])) assert qCount > int(self.y.query( q )['totalhits']) @attr("remote") def testTagInfo(self): """ tests the tag info service """ assert self.y.getTagInfo( ('weblyzard',)) > 10 assert self.y.getTagInfo( ('a_query_which_should_not_appear_at_all', )) == 0 @attr("remote") def testYahooSearchResult(self): """ tests the Yahoo Search Result objects """ for resultSite in Yahoo.getSearchResults(self.y.query( ("linux", "firefox", ), \ count=1, queryParams={'view':'keyterms', 'abstract': 'long'} )): print(resultSite.search_result['keyterms']['terms']) assert len( resultSite.getPageText() ) > len(resultSite.search_result['abstract']) assert 'http' in resultSite.search_result['url'] @attr("remote") def testBorderlineYahooSearchResult(self): """ tests borderline cases such as empty search results """ assert len( Yahoo.getSearchResults(self.y.query( ('ksdaf', 'sadfj93', 'kd9', ), count=10, queryParams={'view':'keyterms', 'abstract': 'long'}) ) ) == 0 @attr("remote") def testMultiProcessingRetrieve(self): """ tests the multi processing capabilities of this module """ from multiprocessing import Pool p = Pool(4) TEST_URLS = ['http://www.derstandard.at', 'http://www.dilbert.com', 'http://www.wetter.at', 'http://www.wu.ac.at', 'http://www.ai.wu.ac.at', 'http://www.tuwien.ac.at', 'http://www.boku.ac.at', 'http://www.univie.ac.at', ] # f=open("/tmp/aw", "w") for res in p.map( p_fetchWebPage, TEST_URLS ): # f.write(res) # f.write("\n-----\n\n\n") assert len(res) > 20 # f.close() @attr("remote") def testFailingUrls(self): """ tests the module with URLs known to fail(!) """ TEST_URLS = ['http://www.mfsa.com.mt/insguide/english/glossarysearch.jsp?letter=all', ] for url in TEST_URLS: assert len( p_fetchWebPage(url).strip() ) > 0 def p_fetchWebPage(url): """ fetches the web page specified in the given yahoo result object @param[in] url the url to fetch @remarks helper function for the testMultiProcessing test """ r = YahooSearchResult( {'url': url} ) return r.getPageText() if __name__ == '__main__': y = Yahoo() #print y.query( ("energy",) ) #print y.query( ("energy", "coal") ) #print y.query( ("d'alembert", "law") ) r = y.query( ("linux", "python", ), count=5, queryParams={'view': 'keyterms', 'abstract': 'long'} ) print("***", r) for entry in r['resultset_web']: print(list(entry.keys())) print(entry['keyterms']['terms']) print(entry['url']) print(entry['abstract'])
gpl-3.0
numba/numba
numba/tests/test_svml.py
3
17178
import math import numpy as np import subprocess import numbers import importlib import sys import re import traceback import multiprocessing as mp from itertools import chain, combinations import numba from numba.core import config, cpu from numba import prange, njit from numba.core.compiler import compile_isolated, Flags from numba.tests.support import TestCase, tag, override_env_config import unittest needs_svml = unittest.skipUnless(config.USING_SVML, "SVML tests need SVML to be present") # a map of float64 vector lenghs with corresponding CPU architecture vlen2cpu = {2: 'nehalem', 4: 'haswell', 8: 'skylake-avx512'} # force LLVM to use AVX512 registers for vectorization # https://reviews.llvm.org/D67259 vlen2cpu_features = {2: '', 4: '', 8: '-prefer-256-bit'} # K: SVML functions, V: python functions which are expected to be SIMD-vectorized # using SVML, explicit references to Python functions here are mostly for sake of # instant import checks. # TODO: [] and comments below mean unused/untested SVML function, it's to be # either enabled or to be replaced with the explanation why the function # cannot be used in Numba # TODO: this test does not support functions with more than 1 arguments yet # The test logic should be modified if there is an SVML function being used under # different name or module from Python svml_funcs = { "sin": [np.sin, math.sin], "cos": [np.cos, math.cos], "pow": [], # pow, math.pow], "exp": [np.exp, math.exp], "log": [np.log, math.log], "acos": [math.acos], "acosh": [math.acosh], "asin": [math.asin], "asinh": [math.asinh], "atan2": [], # math.atan2], "atan": [math.atan], "atanh": [math.atanh], "cbrt": [], # np.cbrt], "cdfnorm": [], "cdfnorminv": [], "ceil": [], # np.ceil, math.ceil], "cosd": [], "cosh": [np.cosh, math.cosh], "erf": [math.erf], # np.erf is available in Intel Distribution "erfc": [math.erfc], "erfcinv": [], "erfinv": [], "exp10": [], "exp2": [], # np.exp2], "expm1": [np.expm1, math.expm1], "floor": [], # np.floor, math.floor], "fmod": [], # np.fmod, math.fmod], "hypot": [], # np.hypot, math.hypot], "invsqrt": [], # available in Intel Distribution "log10": [np.log10, math.log10], "log1p": [np.log1p, math.log1p], "log2": [], # np.log2], "logb": [], "nearbyint": [], "rint": [], # np.rint], "round": [], # round], "sind": [], "sinh": [np.sinh, math.sinh], "sqrt": [np.sqrt, math.sqrt], "tan": [np.tan, math.tan], "tanh": [np.tanh, math.tanh], "trunc": [], # np.trunc, math.trunc], } # TODO: these functions are not vectorizable with complex types complex_funcs_exclude = ["sqrt", "tan", "log10", "expm1", "log1p", "tanh", "log"] # remove untested entries svml_funcs = {k: v for k, v in svml_funcs.items() if len(v) > 0} # lists for functions which belong to numpy and math modules correpondently numpy_funcs = [f for f, v in svml_funcs.items() if "<ufunc" in \ [str(p).split(' ')[0] for p in v]] other_funcs = [f for f, v in svml_funcs.items() if "<built-in" in \ [str(p).split(' ')[0] for p in v]] def func_patterns(func, args, res, dtype, mode, vlen, fastmath, pad=' '*8): """ For a given function and its usage modes, returns python code and assembly patterns it should and should not generate """ # generate a function call according to the usecase if mode == "scalar": arg_list = ','.join([a+'[0]' for a in args]) body = '%s%s[0] += math.%s(%s)\n' % (pad, res, func, arg_list) elif mode == "numpy": body = '%s%s += np.%s(%s)' % (pad, res, func, ','.join(args)) body += '.astype(np.%s)\n' % dtype if dtype.startswith('int') else '\n' else: assert mode == "range" or mode == "prange" arg_list = ','.join([a+'[i]' for a in args]) body = '{pad}for i in {mode}({res}.size):\n' \ '{pad}{pad}{res}[i] += math.{func}({arg_list})\n'. \ format(**locals()) # TODO: refactor so this for-loop goes into umbrella function, # 'mode' can be 'numpy', '0', 'i' instead # TODO: it will enable mixed usecases like prange + numpy # type specialization is_f32 = dtype == 'float32' or dtype == 'complex64' f = func+'f' if is_f32 else func v = vlen*2 if is_f32 else vlen # general expectations prec_suff = '' if fastmath else '_ha' scalar_func = '$_'+f if config.IS_OSX else '$'+f svml_func = '__svml_%s%d%s,' % (f, v, prec_suff) if mode == "scalar": contains = [scalar_func] avoids = ['__svml_', svml_func] else: # will vectorize contains = [svml_func] avoids = [] # [scalar_func] - TODO: if possible, force LLVM to prevent # generating the failsafe scalar paths if vlen != 8 and (is_f32 or dtype == 'int32'): # Issue #3016 avoids += ['%zmm', '__svml_%s%d%s,' % (f, v*2, prec_suff)] # special handling if func == 'sqrt': if mode == "scalar": contains = ['sqrts'] avoids = [scalar_func, svml_func] # LLVM uses CPU instruction elif vlen == 8: contains = ['vsqrtp'] avoids = [scalar_func, svml_func] # LLVM uses CPU instruction # else expect use of SVML for older architectures return body, contains, avoids def usecase_name(dtype, mode, vlen, name): """ Returns pretty name for given set of modes """ return f"{dtype}_{mode}{vlen}_{name}" def combo_svml_usecase(dtype, mode, vlen, fastmath, name): """ Combine multiple function calls under single umbrella usecase """ name = usecase_name(dtype, mode, vlen, name) body = """def {name}(n): x = np.empty(n*8, dtype=np.{dtype}) ret = np.empty_like(x)\n""".format(**locals()) funcs = set(numpy_funcs if mode == "numpy" else other_funcs) if dtype.startswith('complex'): funcs = funcs.difference(complex_funcs_exclude) contains = set() avoids = set() # fill body and expectation patterns for f in funcs: b, c, a = func_patterns(f, ['x'], 'ret', dtype, mode, vlen, fastmath) avoids.update(a) body += b contains.update(c) body += " "*8 + "return ret" # now compile and return it along with its body in __doc__ and patterns ldict = {} exec(body, globals(), ldict) ldict[name].__doc__ = body return ldict[name], contains, avoids @needs_svml class TestSVMLGeneration(TestCase): """ Tests all SVML-generating functions produce desired calls """ # env mutating, must not run in parallel _numba_parallel_test_ = False # RE for a generic symbol reference and for each particular SVML function asm_filter = re.compile('|'.join(['\$[a-z_]\w+,']+list(svml_funcs))) @classmethod def mp_runner(cls, testname, outqueue): method = getattr(cls, testname) try: ok, msg = method() except Exception: msg = traceback.format_exc() ok = False outqueue.put({'status': ok, 'msg': msg}) @classmethod def _inject_test(cls, dtype, mode, vlen, flags): # unsupported combinations if dtype.startswith('complex') and mode != 'numpy': return # TODO: address skipped tests below skipped = dtype.startswith('int') and vlen == 2 sig = (numba.int64,) # unit test body template @staticmethod def run_template(): fn, contains, avoids = combo_svml_usecase(dtype, mode, vlen, flags['fastmath'], flags['name']) # look for specific patters in the asm for a given target with override_env_config('NUMBA_CPU_NAME', vlen2cpu[vlen]), \ override_env_config('NUMBA_CPU_FEATURES', vlen2cpu_features[vlen]): # recompile for overridden CPU try: jitted_fn = njit(sig, fastmath=flags['fastmath'], error_model=flags['error_model'],)(fn) except: raise Exception("raised while compiling "+fn.__doc__) asm = jitted_fn.inspect_asm(sig) missed = [pattern for pattern in contains if not pattern in asm] found = [pattern for pattern in avoids if pattern in asm] ok = not missed and not found detail = '\n'.join( [line for line in asm.split('\n') if cls.asm_filter.search(line) and not '"' in line]) msg = ( f"While expecting {missed} and not {found},\n" f"it contains:\n{detail}\n" f"when compiling {fn.__doc__}" ) return ok, msg # inject it into the class postfix = usecase_name(dtype, mode, vlen, flags['name']) testname = f"run_{postfix}" setattr(cls, testname, run_template) @unittest.skipUnless(not skipped, "Not implemented") def test_runner(self): ctx = mp.get_context("spawn") q = ctx.Queue() p = ctx.Process(target=type(self).mp_runner, args=[testname, q]) p.start() # timeout to avoid hanging and long enough to avoid bailing too early p.join(timeout=10) self.assertEqual(p.exitcode, 0, msg="process ended unexpectedly") out = q.get() status = out['status'] msg = out['msg'] self.assertTrue(status, msg=msg) setattr(cls, f"test_{postfix}", test_runner) @classmethod def autogenerate(cls): flag_list = [{'fastmath':False, 'error_model':'numpy', 'name':'usecase'}, {'fastmath':True, 'error_model':'numpy', 'name':'fastmath_usecase'},] # main loop covering all the modes and use-cases for dtype in ('complex64', 'float64', 'float32', 'int32', ): for vlen in vlen2cpu: for flags in flag_list: for mode in "scalar", "range", "prange", "numpy": cls._inject_test(dtype, mode, vlen, dict(flags)) # mark important for n in ( "test_int32_range4_usecase", # issue #3016 ): setattr(cls, n, tag("important")(getattr(cls, n))) TestSVMLGeneration.autogenerate() def math_sin_scalar(x): return math.sin(x) def math_sin_loop(n): ret = np.empty(n, dtype=np.float64) for x in range(n): ret[x] = math.sin(np.float64(x)) return ret @needs_svml class TestSVML(TestCase): """ Tests SVML behaves as expected """ # env mutating, must not run in parallel _numba_parallel_test_ = False def __init__(self, *args): self.flags = Flags() self.flags.nrt = True # flags for njit(fastmath=True) self.fastflags = Flags() self.fastflags.nrt = True self.fastflags.fastmath = cpu.FastMathOptions(True) super(TestSVML, self).__init__(*args) def compile(self, func, *args, **kwargs): assert not kwargs sig = tuple([numba.typeof(x) for x in args]) std = compile_isolated(func, sig, flags=self.flags) fast = compile_isolated(func, sig, flags=self.fastflags) return std, fast def copy_args(self, *args): if not args: return tuple() new_args = [] for x in args: if isinstance(x, np.ndarray): new_args.append(x.copy('k')) elif isinstance(x, np.number): new_args.append(x.copy()) elif isinstance(x, numbers.Number): new_args.append(x) else: raise ValueError('Unsupported argument type encountered') return tuple(new_args) def check(self, pyfunc, *args, **kwargs): jitstd, jitfast = self.compile(pyfunc, *args) std_pattern = kwargs.pop('std_pattern', None) fast_pattern = kwargs.pop('fast_pattern', None) cpu_name = kwargs.pop('cpu_name', 'skylake-avx512') # force LLVM to use AVX512 registers for vectorization # https://reviews.llvm.org/D67259 cpu_features = kwargs.pop('cpu_features', '-prefer-256-bit') # python result py_expected = pyfunc(*self.copy_args(*args)) # jit result jitstd_result = jitstd.entry_point(*self.copy_args(*args)) # fastmath result jitfast_result = jitfast.entry_point(*self.copy_args(*args)) # assert numerical equality np.testing.assert_almost_equal(jitstd_result, py_expected, **kwargs) np.testing.assert_almost_equal(jitfast_result, py_expected, **kwargs) # look for specific patters in the asm for a given target with override_env_config('NUMBA_CPU_NAME', cpu_name), \ override_env_config('NUMBA_CPU_FEATURES', cpu_features): # recompile for overridden CPU jitstd, jitfast = self.compile(pyfunc, *args) if std_pattern: self.check_svml_presence(jitstd, std_pattern) if fast_pattern: self.check_svml_presence(jitfast, fast_pattern) def check_svml_presence(self, func, pattern): asm = func.library.get_asm_str() self.assertIn(pattern, asm) def test_scalar_context(self): # SVML will not be used. pat = '$_sin' if config.IS_OSX else '$sin' self.check(math_sin_scalar, 7., std_pattern=pat) self.check(math_sin_scalar, 7., fast_pattern=pat) def test_svml(self): # loops both with and without fastmath should use SVML. # The high accuracy routines are dropped if `fastmath` is set std = "__svml_sin8_ha," fast = "__svml_sin8," # No `_ha`! self.check(math_sin_loop, 10, std_pattern=std, fast_pattern=fast) def test_svml_disabled(self): code = """if 1: import os import numpy as np import math def math_sin_loop(n): ret = np.empty(n, dtype=np.float64) for x in range(n): ret[x] = math.sin(np.float64(x)) return ret def check_no_svml(): try: # ban the use of SVML os.environ['NUMBA_DISABLE_INTEL_SVML'] = '1' # delay numba imports to account for env change as # numba.__init__ picks up SVML and it is too late by # then to override using `numba.config` import numba from numba import config from numba.core import cpu from numba.tests.support import override_env_config from numba.core.compiler import compile_isolated, Flags # compile for overridden CPU, with and without fastmath with override_env_config('NUMBA_CPU_NAME', 'skylake-avx512'), \ override_env_config('NUMBA_CPU_FEATURES', ''): sig = (numba.int32,) f = Flags() f.nrt = True std = compile_isolated(math_sin_loop, sig, flags=f) f.fastmath = cpu.FastMathOptions(True) fast = compile_isolated(math_sin_loop, sig, flags=f) fns = std, fast # assert no SVML call is present in the asm for fn in fns: asm = fn.library.get_asm_str() assert '__svml_sin' not in asm finally: # not really needed as process is separate os.environ['NUMBA_DISABLE_INTEL_SVML'] = '0' config.reload_config() check_no_svml() """ popen = subprocess.Popen( [sys.executable, "-c", code], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = popen.communicate() if popen.returncode != 0: raise AssertionError( "process failed with code %s: stderr follows\n%s\n" % (popen.returncode, err.decode())) def test_svml_working_in_non_isolated_context(self): @njit(fastmath={'fast'}, error_model="numpy") def impl(n): x = np.empty(n * 8, dtype=np.float64) ret = np.empty_like(x) for i in range(ret.size): ret[i] += math.cosh(x[i]) return ret impl(1) self.assertTrue('intel_svmlcc' in impl.inspect_llvm(impl.signatures[0])) if __name__ == '__main__': unittest.main()
bsd-2-clause
loco-odoo/localizacion_co
openerp/sql_db.py
39
23723
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ The PostgreSQL connector is a connectivity layer between the OpenERP code and the database, *not* a database abstraction toolkit. Database abstraction is what the ORM does, in fact. """ from contextlib import contextmanager from functools import wraps import logging import urlparse import uuid import psycopg2.extras import psycopg2.extensions from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ_COMMITTED, ISOLATION_LEVEL_REPEATABLE_READ from psycopg2.pool import PoolError psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) _logger = logging.getLogger(__name__) types_mapping = { 'date': (1082,), 'time': (1083,), 'datetime': (1114,), } def unbuffer(symb, cr): if symb is None: return None return str(symb) def undecimalize(symb, cr): if symb is None: return None return float(symb) for name, typeoid in types_mapping.items(): psycopg2.extensions.register_type(psycopg2.extensions.new_type(typeoid, name, lambda x, cr: x)) psycopg2.extensions.register_type(psycopg2.extensions.new_type((700, 701, 1700,), 'float', undecimalize)) import tools from tools.func import frame_codeinfo from datetime import datetime as mdt from datetime import timedelta import threading from inspect import currentframe import re re_from = re.compile('.* from "?([a-zA-Z_0-9]+)"? .*$') re_into = re.compile('.* into "?([a-zA-Z_0-9]+)"? .*$') sql_counter = 0 class Cursor(object): """Represents an open transaction to the PostgreSQL DB backend, acting as a lightweight wrapper around psycopg2's ``cursor`` objects. ``Cursor`` is the object behind the ``cr`` variable used all over the OpenERP code. .. rubric:: Transaction Isolation One very important property of database transactions is the level of isolation between concurrent transactions. The SQL standard defines four levels of transaction isolation, ranging from the most strict *Serializable* level, to the least strict *Read Uncommitted* level. These levels are defined in terms of the phenomena that must not occur between concurrent transactions, such as *dirty read*, etc. In the context of a generic business data management software such as OpenERP, we need the best guarantees that no data corruption can ever be cause by simply running multiple transactions in parallel. Therefore, the preferred level would be the *serializable* level, which ensures that a set of transactions is guaranteed to produce the same effect as running them one at a time in some order. However, most database management systems implement a limited serializable isolation in the form of `snapshot isolation <http://en.wikipedia.org/wiki/Snapshot_isolation>`_, providing most of the same advantages as True Serializability, with a fraction of the performance cost. With PostgreSQL up to version 9.0, this snapshot isolation was the implementation of both the ``REPEATABLE READ`` and ``SERIALIZABLE`` levels of the SQL standard. As of PostgreSQL 9.1, the previous snapshot isolation implementation was kept for ``REPEATABLE READ``, while a new ``SERIALIZABLE`` level was introduced, providing some additional heuristics to detect a concurrent update by parallel transactions, and forcing one of them to rollback. OpenERP implements its own level of locking protection for transactions that are highly likely to provoke concurrent updates, such as stock reservations or document sequences updates. Therefore we mostly care about the properties of snapshot isolation, but we don't really need additional heuristics to trigger transaction rollbacks, as we are taking care of triggering instant rollbacks ourselves when it matters (and we can save the additional performance hit of these heuristics). As a result of the above, we have selected ``REPEATABLE READ`` as the default transaction isolation level for OpenERP cursors, as it will be mapped to the desired ``snapshot isolation`` level for all supported PostgreSQL version (8.3 - 9.x). Note: up to psycopg2 v.2.4.2, psycopg2 itself remapped the repeatable read level to serializable before sending it to the database, so it would actually select the new serializable mode on PostgreSQL 9.1. Make sure you use psycopg2 v2.4.2 or newer if you use PostgreSQL 9.1 and the performance hit is a concern for you. .. attribute:: cache Cache dictionary with a "request" (-ish) lifecycle, only lives as long as the cursor itself does and proactively cleared when the cursor is closed. This cache should *only* be used to store repeatable reads as it ignores rollbacks and savepoints, it should not be used to store *any* data which may be modified during the life of the cursor. """ IN_MAX = 1000 # decent limit on size of IN queries - guideline = Oracle limit def check(f): @wraps(f) def wrapper(self, *args, **kwargs): if self._closed: msg = 'Unable to use a closed cursor.' if self.__closer: msg += ' It was closed at %s, line %s' % self.__closer raise psycopg2.OperationalError(msg) return f(self, *args, **kwargs) return wrapper def __init__(self, pool, dbname, dsn, serialized=True): self.sql_from_log = {} self.sql_into_log = {} # default log level determined at cursor creation, could be # overridden later for debugging purposes self.sql_log = _logger.isEnabledFor(logging.DEBUG) self.sql_log_count = 0 # avoid the call of close() (by __del__) if an exception # is raised by any of the following initialisations self._closed = True self.__pool = pool self.dbname = dbname # Whether to enable snapshot isolation level for this cursor. # see also the docstring of Cursor. self._serialized = serialized self._cnx = pool.borrow(dsn) self._obj = self._cnx.cursor() if self.sql_log: self.__caller = frame_codeinfo(currentframe(), 2) else: self.__caller = False self._closed = False # real initialisation value self.autocommit(False) self.__closer = False self._default_log_exceptions = True self.cache = {} def __build_dict(self, row): return {d.name: row[i] for i, d in enumerate(self._obj.description)} def dictfetchone(self): row = self._obj.fetchone() return row and self.__build_dict(row) def dictfetchmany(self, size): return map(self.__build_dict, self._obj.fetchmany(size)) def dictfetchall(self): return map(self.__build_dict, self._obj.fetchall()) def __del__(self): if not self._closed and not self._cnx.closed: # Oops. 'self' has not been closed explicitly. # The cursor will be deleted by the garbage collector, # but the database connection is not put back into the connection # pool, preventing some operation on the database like dropping it. # This can also lead to a server overload. msg = "Cursor not closed explicitly\n" if self.__caller: msg += "Cursor was created at %s:%s" % self.__caller else: msg += "Please enable sql debugging to trace the caller." _logger.warning(msg) self._close(True) @check def execute(self, query, params=None, log_exceptions=None): if '%d' in query or '%f' in query: _logger.warning(query) _logger.warning("SQL queries cannot contain %d or %f anymore. Use only %s") if params and not isinstance(params, (tuple, list, dict)): _logger.error("SQL query parameters should be a tuple, list or dict; got %r", params) raise ValueError("SQL query parameters should be a tuple, list or dict; got %r" % (params,)) if self.sql_log: now = mdt.now() try: params = params or None res = self._obj.execute(query, params) except psycopg2.ProgrammingError, pe: if self._default_log_exceptions if log_exceptions is None else log_exceptions: _logger.error("Programming error: %s, in query %s", pe, query) raise except Exception: if self._default_log_exceptions if log_exceptions is None else log_exceptions: _logger.exception("bad query: %s", self._obj.query or query) raise # simple query count is always computed self.sql_log_count += 1 # advanced stats only if sql_log is enabled if self.sql_log: delay = mdt.now() - now delay = delay.seconds * 1E6 + delay.microseconds _logger.debug("query: %s", self._obj.query) res_from = re_from.match(query.lower()) if res_from: self.sql_from_log.setdefault(res_from.group(1), [0, 0]) self.sql_from_log[res_from.group(1)][0] += 1 self.sql_from_log[res_from.group(1)][1] += delay res_into = re_into.match(query.lower()) if res_into: self.sql_into_log.setdefault(res_into.group(1), [0, 0]) self.sql_into_log[res_into.group(1)][0] += 1 self.sql_into_log[res_into.group(1)][1] += delay return res def split_for_in_conditions(self, ids): """Split a list of identifiers into one or more smaller tuples safe for IN conditions, after uniquifying them.""" return tools.misc.split_every(self.IN_MAX, set(ids)) def print_log(self): global sql_counter if not self.sql_log: return def process(type): sqllogs = {'from': self.sql_from_log, 'into': self.sql_into_log} sum = 0 if sqllogs[type]: sqllogitems = sqllogs[type].items() sqllogitems.sort(key=lambda k: k[1][1]) _logger.debug("SQL LOG %s:", type) sqllogitems.sort(lambda x, y: cmp(x[1][0], y[1][0])) for r in sqllogitems: delay = timedelta(microseconds=r[1][1]) _logger.debug("table: %s: %s/%s", r[0], delay, r[1][0]) sum += r[1][1] sqllogs[type].clear() sum = timedelta(microseconds=sum) _logger.debug("SUM %s:%s/%d [%d]", type, sum, self.sql_log_count, sql_counter) sqllogs[type].clear() process('from') process('into') self.sql_log_count = 0 self.sql_log = False @check def close(self): return self._close(False) def _close(self, leak=False): global sql_counter if not self._obj: return del self.cache if self.sql_log: self.__closer = frame_codeinfo(currentframe(), 3) # simple query count is always computed sql_counter += self.sql_log_count # advanced stats only if sql_log is enabled self.print_log() self._obj.close() # This force the cursor to be freed, and thus, available again. It is # important because otherwise we can overload the server very easily # because of a cursor shortage (because cursors are not garbage # collected as fast as they should). The problem is probably due in # part because browse records keep a reference to the cursor. del self._obj self._closed = True # Clean the underlying connection. self._cnx.rollback() if leak: self._cnx.leaked = True else: chosen_template = tools.config['db_template'] templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template])) keep_in_pool = self.dbname not in templates_list self.__pool.give_back(self._cnx, keep_in_pool=keep_in_pool) @check def autocommit(self, on): if on: isolation_level = ISOLATION_LEVEL_AUTOCOMMIT else: # If a serializable cursor was requested, we # use the appropriate PotsgreSQL isolation level # that maps to snaphsot isolation. # For all supported PostgreSQL versions (8.3-9.x), # this is currently the ISOLATION_REPEATABLE_READ. # See also the docstring of this class. # NOTE: up to psycopg 2.4.2, repeatable read # is remapped to serializable before being # sent to the database, so it is in fact # unavailable for use with pg 9.1. isolation_level = \ ISOLATION_LEVEL_REPEATABLE_READ \ if self._serialized \ else ISOLATION_LEVEL_READ_COMMITTED self._cnx.set_isolation_level(isolation_level) @check def commit(self): """ Perform an SQL `COMMIT` """ return self._cnx.commit() @check def rollback(self): """ Perform an SQL `ROLLBACK` """ return self._cnx.rollback() def __enter__(self): """ Using the cursor as a contextmanager automatically commits and closes it:: with cr: cr.execute(...) # cr is committed if no failure occurred # cr is closed in any case """ return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: self.commit() self.close() @contextmanager @check def savepoint(self): """context manager entering in a new savepoint""" name = uuid.uuid1().hex self.execute('SAVEPOINT "%s"' % name) try: yield self.execute('RELEASE SAVEPOINT "%s"' % name) except: self.execute('ROLLBACK TO SAVEPOINT "%s"' % name) raise @check def __getattr__(self, name): return getattr(self._obj, name) class TestCursor(Cursor): """ A cursor to be used for tests. It keeps the transaction open across several requests, and simulates committing, rolling back, and closing. """ def __init__(self, *args, **kwargs): super(TestCursor, self).__init__(*args, **kwargs) # in order to simulate commit and rollback, the cursor maintains a # savepoint at its last commit self.execute("SAVEPOINT test_cursor") # we use a lock to serialize concurrent requests self._lock = threading.RLock() def acquire(self): self._lock.acquire() def release(self): self._lock.release() def force_close(self): super(TestCursor, self).close() def close(self): if not self._closed: self.rollback() # for stuff that has not been committed self.release() def autocommit(self, on): _logger.debug("TestCursor.autocommit(%r) does nothing", on) def commit(self): self.execute("RELEASE SAVEPOINT test_cursor") self.execute("SAVEPOINT test_cursor") def rollback(self): self.execute("ROLLBACK TO SAVEPOINT test_cursor") self.execute("SAVEPOINT test_cursor") class PsycoConnection(psycopg2.extensions.connection): pass class ConnectionPool(object): """ The pool of connections to database(s) Keep a set of connections to pg databases open, and reuse them to open cursors for all transactions. The connections are *not* automatically closed. Only a close_db() can trigger that. """ def locked(fun): @wraps(fun) def _locked(self, *args, **kwargs): self._lock.acquire() try: return fun(self, *args, **kwargs) finally: self._lock.release() return _locked def __init__(self, maxconn=64): self._connections = [] self._maxconn = max(maxconn, 1) self._lock = threading.Lock() def __repr__(self): used = len([1 for c, u in self._connections[:] if u]) count = len(self._connections) return "ConnectionPool(used=%d/count=%d/max=%d)" % (used, count, self._maxconn) def _debug(self, msg, *args): _logger.debug(('%r ' + msg), self, *args) @locked def borrow(self, dsn): # free dead and leaked connections for i, (cnx, _) in tools.reverse_enumerate(self._connections): if cnx.closed: self._connections.pop(i) self._debug('Removing closed connection at index %d: %r', i, cnx.dsn) continue if getattr(cnx, 'leaked', False): delattr(cnx, 'leaked') self._connections.pop(i) self._connections.append((cnx, False)) _logger.warning('%r: Free leaked connection to %r', self, cnx.dsn) for i, (cnx, used) in enumerate(self._connections): if not used and cnx._original_dsn == dsn: try: cnx.reset() except psycopg2.OperationalError: self._debug('Cannot reset connection at index %d: %r', i, cnx.dsn) # psycopg2 2.4.4 and earlier do not allow closing a closed connection if not cnx.closed: cnx.close() continue self._connections.pop(i) self._connections.append((cnx, True)) self._debug('Borrow existing connection to %r at index %d', cnx.dsn, i) return cnx if len(self._connections) >= self._maxconn: # try to remove the oldest connection not used for i, (cnx, used) in enumerate(self._connections): if not used: self._connections.pop(i) if not cnx.closed: cnx.close() self._debug('Removing old connection at index %d: %r', i, cnx.dsn) break else: # note: this code is called only if the for loop has completed (no break) raise PoolError('The Connection Pool Is Full') try: result = psycopg2.connect(dsn=dsn, connection_factory=PsycoConnection) except psycopg2.Error: _logger.exception('Connection to the database failed') raise result._original_dsn = dsn self._connections.append((result, True)) self._debug('Create new connection') return result @locked def give_back(self, connection, keep_in_pool=True): self._debug('Give back connection to %r', connection.dsn) for i, (cnx, used) in enumerate(self._connections): if cnx is connection: self._connections.pop(i) if keep_in_pool: self._connections.append((cnx, False)) self._debug('Put connection to %r in pool', cnx.dsn) else: self._debug('Forgot connection to %r', cnx.dsn) cnx.close() break else: raise PoolError('This connection does not below to the pool') @locked def close_all(self, dsn=None): count = 0 last = None for i, (cnx, used) in tools.reverse_enumerate(self._connections): if dsn is None or cnx._original_dsn == dsn: cnx.close() last = self._connections.pop(i)[0] count += 1 _logger.info('%r: Closed %d connections %s', self, count, (dsn and last and 'to %r' % last.dsn) or '') class Connection(object): """ A lightweight instance of a connection to postgres """ def __init__(self, pool, dbname, dsn): self.dbname = dbname self.dsn = dsn self.__pool = pool def cursor(self, serialized=True): cursor_type = serialized and 'serialized ' or '' _logger.debug('create %scursor to %r', cursor_type, self.dsn) return Cursor(self.__pool, self.dbname, self.dsn, serialized=serialized) def test_cursor(self, serialized=True): cursor_type = serialized and 'serialized ' or '' _logger.debug('create test %scursor to %r', cursor_type, self.dsn) return TestCursor(self.__pool, self.dbname, self.dsn, serialized=serialized) # serialized_cursor is deprecated - cursors are serialized by default serialized_cursor = cursor def __nonzero__(self): """Check if connection is possible""" try: _logger.warning("__nonzero__() is deprecated. (It is too expensive to test a connection.)") cr = self.cursor() cr.close() return True except Exception: return False def dsn(db_or_uri): """parse the given `db_or_uri` and return a 2-tuple (dbname, uri)""" if db_or_uri.startswith(('postgresql://', 'postgres://')): # extract db from uri us = urlparse.urlsplit(db_or_uri) if len(us.path) > 1: db_name = us.path[1:] elif us.username: db_name = us.username else: db_name = us.hostname return db_name, db_or_uri _dsn = '' for p in ('host', 'port', 'user', 'password'): cfg = tools.config['db_' + p] if cfg: _dsn += '%s=%s ' % (p, cfg) return db_or_uri, '%sdbname=%s' % (_dsn, db_or_uri) _Pool = None def db_connect(to, allow_uri=False): global _Pool if _Pool is None: _Pool = ConnectionPool(int(tools.config['db_maxconn'])) db, uri = dsn(to) if not allow_uri and db != to: raise ValueError('URI connections not allowed') return Connection(_Pool, db, uri) def close_db(db_name): """ You might want to call openerp.modules.registry.RegistryManager.delete(db_name) along this function.""" global _Pool if _Pool: _Pool.close_all(dsn(db_name)[1]) def close_all(): global _Pool if _Pool: _Pool.close_all() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
haiwen/pyes
tests/test_rivers.py
5
2907
# -*- coding: utf-8 -*- from __future__ import absolute_import import unittest from pyes.tests import ESTestCase from pyes.rivers import CouchDBRiver, RabbitMQRiver, TwitterRiver class RiversTestCase(ESTestCase): def setUp(self): super(RiversTestCase, self).setUp() def testCreateCouchDBRiver(self): """ Testing deleting a river """ test_river = CouchDBRiver(index_name='text_index', index_type='test_type') result = self.conn.create_river(test_river, river_name='test_index') self.assertResultContains(result, {'ok': True}) def testDeleteCouchDBRiver(self): """ Testing deleting a river """ test_river = CouchDBRiver(index_name='text_index', index_type='test_type') result = self.conn.delete_river(test_river, river_name='test_index') self.assertResultContains(result, {'ok': True}) def testCreateRabbitMQRiver(self): """ Testing deleting a river """ test_river = RabbitMQRiver(index_name='text_index', index_type='test_type') result = self.conn.create_river(test_river, river_name='test_index') self.assertResultContains(result, {'ok': True}) def testDeleteRabbitMQRiver(self): """ Delete RabbitMQ river """ test_river = RabbitMQRiver(index_name='text_index', index_type='test_type') result = self.conn.create_river(test_river, river_name='test_index') result = self.conn.delete_river(test_river, river_name='test_index') self.assertResultContains(result, {'ok': True}) def testCreateTwitterRiver(self): """ Create twitter river """ test_river = TwitterRiver('test', 'test', index_name='text_index', index_type='status') result = self.conn.create_river(test_river, river_name='test_index') self.assertResultContains(result, {'ok': True}) def testDeleteTwitterRiver(self): """ Delete Twitter river """ test_river = TwitterRiver('test', 'test', index_name='text_index', index_type='status') result = self.conn.create_river(test_river, river_name='test_index') result = self.conn.delete_river(test_river, river_name='test_index') self.assertResultContains(result, {'ok': True}) def testCreateTwitterRiverOAuth(self): test_river = TwitterRiver('test', 'test', index_name='text_index', index_type='test_type', consumer_key="aaa", consumer_secret="aaa", access_token="aaa", access_token_secret="aaa", ) result = self.conn.create_river(test_river, river_name='test_index') self.assertResultContains(result, {'ok': True}) if __name__ == "__main__": unittest.main()
bsd-3-clause
htygithub/bokeh
bokeh/sampledata/gapminder.py
41
2655
from __future__ import absolute_import import pandas as pd from os.path import join import sys from . import _data_dir ''' This module provides a pandas DataFrame instance of four of the datasets from gapminder.org. These are read in from csvs that have been downloaded from Bokeh's sample data on S3. But the original code that generated the csvs from the raw gapminder data is available at the bottom of this file. ''' data_dir = _data_dir() datasets = [ 'fertility', 'life_expectancy', 'population', 'regions', ] for dataset in datasets: filename = join(data_dir, 'gapminder_%s.csv' % dataset) try: setattr( sys.modules[__name__], dataset, pd.read_csv(filename, index_col='Country') ) except (IOError, OSError): raise RuntimeError('Could not load gapminder data file "%s". Please execute bokeh.sampledata.download()' % filename) __all__ = datasets # ==================================================== # Original data is from Gapminder - www.gapminder.org. # The google docs links are maintained by gapminder # The following script was used to get the data from gapminder # and process it into the csvs stored in bokeh's sampledata. """ population_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0XOoBL_n5tAQ&output=xls" fertility_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0TAlJeCEzcGQ&output=xls" life_expectancy_url = "http://spreadsheets.google.com/pub?key=tiAiXcrneZrUnnJ9dBU-PAw&output=xls" regions_url = "https://docs.google.com/spreadsheets/d/1OxmGUNWeADbPJkQxVPupSOK5MbAECdqThnvyPrwG5Os/pub?gid=1&output=xls" def _get_data(url): # Get the data from the url and return only 1962 - 2013 df = pd.read_excel(url, index_col=0) df = df.unstack().unstack() df = df[(df.index >= 1964) & (df.index <= 2013)] df = df.unstack().unstack() return df fertility_df = _get_data(fertility_url) life_expectancy_df = _get_data(life_expectancy_url) population_df = _get_data(population_url) regions_df = pd.read_excel(regions_url, index_col=0) # have common countries across all data fertility_df = fertility_df.drop(fertility_df.index.difference(life_expectancy_df.index)) population_df = population_df.drop(population_df.index.difference(life_expectancy_df.index)) regions_df = regions_df.drop(regions_df.index.difference(life_expectancy_df.index)) fertility_df.to_csv('gapminder_fertility.csv') population_df.to_csv('gapminder_population.csv') life_expectancy_df.to_csv('gapminder_life_expectancy.csv') regions_df.to_csv('gapminder_regions.csv') """ # ======================================================
bsd-3-clause
chirilo/remo
vendor-local/lib/python/unidecode/x0c2.py
253
4710
data = ( 'syon', # 0x00 'syonj', # 0x01 'syonh', # 0x02 'syod', # 0x03 'syol', # 0x04 'syolg', # 0x05 'syolm', # 0x06 'syolb', # 0x07 'syols', # 0x08 'syolt', # 0x09 'syolp', # 0x0a 'syolh', # 0x0b 'syom', # 0x0c 'syob', # 0x0d 'syobs', # 0x0e 'syos', # 0x0f 'syoss', # 0x10 'syong', # 0x11 'syoj', # 0x12 'syoc', # 0x13 'syok', # 0x14 'syot', # 0x15 'syop', # 0x16 'syoh', # 0x17 'su', # 0x18 'sug', # 0x19 'sugg', # 0x1a 'sugs', # 0x1b 'sun', # 0x1c 'sunj', # 0x1d 'sunh', # 0x1e 'sud', # 0x1f 'sul', # 0x20 'sulg', # 0x21 'sulm', # 0x22 'sulb', # 0x23 'suls', # 0x24 'sult', # 0x25 'sulp', # 0x26 'sulh', # 0x27 'sum', # 0x28 'sub', # 0x29 'subs', # 0x2a 'sus', # 0x2b 'suss', # 0x2c 'sung', # 0x2d 'suj', # 0x2e 'suc', # 0x2f 'suk', # 0x30 'sut', # 0x31 'sup', # 0x32 'suh', # 0x33 'sweo', # 0x34 'sweog', # 0x35 'sweogg', # 0x36 'sweogs', # 0x37 'sweon', # 0x38 'sweonj', # 0x39 'sweonh', # 0x3a 'sweod', # 0x3b 'sweol', # 0x3c 'sweolg', # 0x3d 'sweolm', # 0x3e 'sweolb', # 0x3f 'sweols', # 0x40 'sweolt', # 0x41 'sweolp', # 0x42 'sweolh', # 0x43 'sweom', # 0x44 'sweob', # 0x45 'sweobs', # 0x46 'sweos', # 0x47 'sweoss', # 0x48 'sweong', # 0x49 'sweoj', # 0x4a 'sweoc', # 0x4b 'sweok', # 0x4c 'sweot', # 0x4d 'sweop', # 0x4e 'sweoh', # 0x4f 'swe', # 0x50 'sweg', # 0x51 'swegg', # 0x52 'swegs', # 0x53 'swen', # 0x54 'swenj', # 0x55 'swenh', # 0x56 'swed', # 0x57 'swel', # 0x58 'swelg', # 0x59 'swelm', # 0x5a 'swelb', # 0x5b 'swels', # 0x5c 'swelt', # 0x5d 'swelp', # 0x5e 'swelh', # 0x5f 'swem', # 0x60 'sweb', # 0x61 'swebs', # 0x62 'swes', # 0x63 'swess', # 0x64 'sweng', # 0x65 'swej', # 0x66 'swec', # 0x67 'swek', # 0x68 'swet', # 0x69 'swep', # 0x6a 'sweh', # 0x6b 'swi', # 0x6c 'swig', # 0x6d 'swigg', # 0x6e 'swigs', # 0x6f 'swin', # 0x70 'swinj', # 0x71 'swinh', # 0x72 'swid', # 0x73 'swil', # 0x74 'swilg', # 0x75 'swilm', # 0x76 'swilb', # 0x77 'swils', # 0x78 'swilt', # 0x79 'swilp', # 0x7a 'swilh', # 0x7b 'swim', # 0x7c 'swib', # 0x7d 'swibs', # 0x7e 'swis', # 0x7f 'swiss', # 0x80 'swing', # 0x81 'swij', # 0x82 'swic', # 0x83 'swik', # 0x84 'swit', # 0x85 'swip', # 0x86 'swih', # 0x87 'syu', # 0x88 'syug', # 0x89 'syugg', # 0x8a 'syugs', # 0x8b 'syun', # 0x8c 'syunj', # 0x8d 'syunh', # 0x8e 'syud', # 0x8f 'syul', # 0x90 'syulg', # 0x91 'syulm', # 0x92 'syulb', # 0x93 'syuls', # 0x94 'syult', # 0x95 'syulp', # 0x96 'syulh', # 0x97 'syum', # 0x98 'syub', # 0x99 'syubs', # 0x9a 'syus', # 0x9b 'syuss', # 0x9c 'syung', # 0x9d 'syuj', # 0x9e 'syuc', # 0x9f 'syuk', # 0xa0 'syut', # 0xa1 'syup', # 0xa2 'syuh', # 0xa3 'seu', # 0xa4 'seug', # 0xa5 'seugg', # 0xa6 'seugs', # 0xa7 'seun', # 0xa8 'seunj', # 0xa9 'seunh', # 0xaa 'seud', # 0xab 'seul', # 0xac 'seulg', # 0xad 'seulm', # 0xae 'seulb', # 0xaf 'seuls', # 0xb0 'seult', # 0xb1 'seulp', # 0xb2 'seulh', # 0xb3 'seum', # 0xb4 'seub', # 0xb5 'seubs', # 0xb6 'seus', # 0xb7 'seuss', # 0xb8 'seung', # 0xb9 'seuj', # 0xba 'seuc', # 0xbb 'seuk', # 0xbc 'seut', # 0xbd 'seup', # 0xbe 'seuh', # 0xbf 'syi', # 0xc0 'syig', # 0xc1 'syigg', # 0xc2 'syigs', # 0xc3 'syin', # 0xc4 'syinj', # 0xc5 'syinh', # 0xc6 'syid', # 0xc7 'syil', # 0xc8 'syilg', # 0xc9 'syilm', # 0xca 'syilb', # 0xcb 'syils', # 0xcc 'syilt', # 0xcd 'syilp', # 0xce 'syilh', # 0xcf 'syim', # 0xd0 'syib', # 0xd1 'syibs', # 0xd2 'syis', # 0xd3 'syiss', # 0xd4 'sying', # 0xd5 'syij', # 0xd6 'syic', # 0xd7 'syik', # 0xd8 'syit', # 0xd9 'syip', # 0xda 'syih', # 0xdb 'si', # 0xdc 'sig', # 0xdd 'sigg', # 0xde 'sigs', # 0xdf 'sin', # 0xe0 'sinj', # 0xe1 'sinh', # 0xe2 'sid', # 0xe3 'sil', # 0xe4 'silg', # 0xe5 'silm', # 0xe6 'silb', # 0xe7 'sils', # 0xe8 'silt', # 0xe9 'silp', # 0xea 'silh', # 0xeb 'sim', # 0xec 'sib', # 0xed 'sibs', # 0xee 'sis', # 0xef 'siss', # 0xf0 'sing', # 0xf1 'sij', # 0xf2 'sic', # 0xf3 'sik', # 0xf4 'sit', # 0xf5 'sip', # 0xf6 'sih', # 0xf7 'ssa', # 0xf8 'ssag', # 0xf9 'ssagg', # 0xfa 'ssags', # 0xfb 'ssan', # 0xfc 'ssanj', # 0xfd 'ssanh', # 0xfe 'ssad', # 0xff )
bsd-3-clause
Adenilson/servo
tests/wpt/web-platform-tests/html/tools/update_html5lib_tests.py
125
5358
import sys import os import hashlib import urllib import itertools import re import json import glob import shutil try: import genshi from genshi.template import MarkupTemplate from html5lib.tests import support except ImportError: print """This script requires the Genshi templating library and html5lib source It is recommended that these are installed in a virtualenv: virtualenv venv source venv/bin/activate pip install genshi cd venv git clone [email protected]:html5lib/html5lib-python.git html5lib cd html5lib git submodule init git submodule update pip install -e ./ Then run this script again, with the virtual environment still active. When you are done, type "deactivate" to deactivate the virtual environment. """ TESTS_PATH = "html/syntax/parsing/" def get_paths(): script_path = os.path.split(os.path.abspath(__file__))[0] repo_base = get_repo_base(script_path) tests_path = os.path.join(repo_base, TESTS_PATH) return script_path, tests_path def get_repo_base(path): while path: if os.path.exists(os.path.join(path, ".git")): return path else: path = os.path.split(path)[0] def get_expected(data): data = "#document\n" + data return data def get_hash(data, container=None): if container == None: container = "" return hashlib.sha1("#container%s#data%s"%(container.encode("utf8"), data.encode("utf8"))).hexdigest() def make_tests(script_dir, out_dir, input_file_name, test_data): tests = [] innerHTML_tests = [] ids_seen = {} print input_file_name for test in test_data: if "script-off" in test: continue is_innerHTML = "document-fragment" in test data = test["data"] container = test["document-fragment"] if is_innerHTML else None assert test["document"], test expected = get_expected(test["document"]) test_list = innerHTML_tests if is_innerHTML else tests test_id = get_hash(data, container) if test_id in ids_seen: print "WARNING: id %s seen multiple times in file %s this time for test (%s, %s) before for test %s, skipping"%(test_id, input_file_name, container, data, ids_seen[test_id]) continue ids_seen[test_id] = (container, data) test_list.append({'string_uri_encoded_input':"\"%s\""%urllib.quote(data.encode("utf8")), 'input':data, 'expected':expected, 'string_escaped_expected':json.dumps(urllib.quote(expected.encode("utf8"))), 'id':test_id, 'container':container }) path_normal = None if tests: path_normal = write_test_file(script_dir, out_dir, tests, "html5lib_%s"%input_file_name, "html5lib_test.xml") path_innerHTML = None if innerHTML_tests: path_innerHTML = write_test_file(script_dir, out_dir, innerHTML_tests, "html5lib_innerHTML_%s"%input_file_name, "html5lib_test_fragment.xml") return path_normal, path_innerHTML def write_test_file(script_dir, out_dir, tests, file_name, template_file_name): file_name = os.path.join(out_dir, file_name + ".html") short_name = os.path.split(file_name)[1] with open(os.path.join(script_dir, template_file_name)) as f: template = MarkupTemplate(f) stream = template.generate(file_name=short_name, tests=tests) with open(file_name, "w") as f: f.write(stream.render('html', doctype='html5', encoding="utf8")) return file_name def escape_js_string(in_data): return in_data.encode("utf8").encode("string-escape") def serialize_filenames(test_filenames): return "[" + ",\n".join("\"%s\""%item for item in test_filenames) + "]" def main(): script_dir, out_dir = get_paths() test_files = [] inner_html_files = [] if len(sys.argv) > 2: test_iterator = itertools.izip( itertools.repeat(False), sorted(os.path.abspath(item) for item in glob.glob(os.path.join(sys.argv[2], "*.dat")))) else: test_iterator = itertools.chain( itertools.izip(itertools.repeat(False), sorted(support.get_data_files("tree-construction"))), itertools.izip(itertools.repeat(True), sorted(support.get_data_files( os.path.join("tree-construction", "scripted"))))) for (scripted, test_file) in test_iterator: input_file_name = os.path.splitext(os.path.split(test_file)[1])[0] if scripted: input_file_name = "scripted_" + input_file_name test_data = support.TestData(test_file) test_filename, inner_html_file_name = make_tests(script_dir, out_dir, input_file_name, test_data) if test_filename is not None: test_files.append(test_filename) if inner_html_file_name is not None: inner_html_files.append(inner_html_file_name) if __name__ == "__main__": main()
mpl-2.0
crafty78/ansible
lib/ansible/modules/network/ios/ios_facts.py
28
13900
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'core', 'version': '1.0'} DOCUMENTATION = """ --- module: ios_facts version_added: "2.2" author: "Peter Sprygada (@privateip)" short_description: Collect facts from remote devices running IOS description: - Collects a base set of device facts from a remote device that is running IOS. This module prepends all of the base network fact keys with C(ansible_net_<fact>). The facts module will always collect a base set of facts from the device and can enable or disable collection of additional facts. extends_documentation_fragment: ios options: gather_subset: description: - When supplied, this argument will restrict the facts collected to a given subset. Possible values for this argument include all, hardware, config, and interfaces. Can specify a list of values to include a larger subset. Values can also be used with an initial C(M(!)) to specify that a specific subset should not be collected. required: false default: '!config' """ EXAMPLES = """ # Note: examples below use the following provider dict to handle # transport and authentication to the node. vars: cli: host: "{{ inventory_hostname }}" username: cisco password: cisco transport: cli # Collect all facts from the device - ios_facts: gather_subset: all provider: "{{ cli }}" # Collect only the config and default facts - ios_facts: gather_subset: - config provider: "{{ cli }}" # Do not collect hardware facts - ios_facts: gather_subset: - "!hardware" provider: "{{ cli }}" """ RETURN = """ ansible_net_gather_subset: description: The list of fact subsets collected from the device returned: always type: list # default ansible_net_model: description: The model name returned from the device returned: always type: str ansible_net_serialnum: description: The serial number of the remote device returned: always type: str ansible_net_version: description: The operating system version running on the remote device returned: always type: str ansible_net_hostname: description: The configured hostname of the device returned: always type: string ansible_net_image: description: The image file the device is running returned: always type: string # hardware ansible_net_filesystems: description: All file system names available on the device returned: when hardware is configured type: list ansible_net_memfree_mb: description: The available free memory on the remote device in Mb returned: when hardware is configured type: int ansible_net_memtotal_mb: description: The total memory on the remote device in Mb returned: when hardware is configured type: int # config ansible_net_config: description: The current active config from the device returned: when config is configured type: str # interfaces ansible_net_all_ipv4_addresses: description: All IPv4 addresses configured on the device returned: when interfaces is configured type: list ansible_net_all_ipv6_addresses: description: All IPv6 addresses configured on the device returned: when interfaces is configured type: list ansible_net_interfaces: description: A hash of all interfaces running on the system returned: when interfaces is configured type: dict ansible_net_neighbors: description: The list of LLDP neighbors from the remote device returned: when interfaces is configured type: dict """ import re import itertools import ansible.module_utils.ios from ansible.module_utils.network import NetworkModule from ansible.module_utils.six import iteritems from ansible.module_utils.six.moves import zip class FactsBase(object): def __init__(self, module): self.module = module self.facts = dict() self.failed_commands = list() def run(self, cmd): try: return self.module.cli(cmd)[0] except: self.failed_commands.append(cmd) class Default(FactsBase): def populate(self): data = self.run('show version') if data: self.facts['version'] = self.parse_version(data) self.facts['serialnum'] = self.parse_serialnum(data) self.facts['model'] = self.parse_model(data) self.facts['image'] = self.parse_image(data) self.facts['hostname'] = self.parse_hostname(data) def parse_version(self, data): match = re.search(r'Version (\S+),', data) if match: return match.group(1) def parse_hostname(self, data): match = re.search(r'^(.+) uptime', data, re.M) if match: return match.group(1) def parse_model(self, data): match = re.search(r'^Cisco (.+) \(revision', data, re.M) if match: return match.group(1) def parse_image(self, data): match = re.search(r'image file is "(.+)"', data) if match: return match.group(1) def parse_serialnum(self, data): match = re.search(r'board ID (\S+)', data) if match: return match.group(1) class Hardware(FactsBase): def populate(self): data = self.run('dir | include Directory') if data: self.facts['filesystems'] = self.parse_filesystems(data) data = self.run('show memory statistics | include Processor') if data: match = re.findall(r'\s(\d+)\s', data) if match: self.facts['memtotal_mb'] = int(match[0]) / 1024 self.facts['memfree_mb'] = int(match[1]) / 1024 def parse_filesystems(self, data): return re.findall(r'^Directory of (\S+)/', data, re.M) class Config(FactsBase): def populate(self): data = self.run('show running-config') if data: self.facts['config'] = data class Interfaces(FactsBase): def populate(self): self.facts['all_ipv4_addresses'] = list() self.facts['all_ipv6_addresses'] = list() data = self.run('show interfaces') if data: interfaces = self.parse_interfaces(data) self.facts['interfaces'] = self.populate_interfaces(interfaces) data = self.run('show ipv6 interface') if data: data = self.parse_interfaces(data) self.populate_ipv6_interfaces(data) data = self.run('show lldp') if 'LLDP is not enabled' not in data: neighbors = self.run('show lldp neighbors detail') if neighbors: self.facts['neighbors'] = self.parse_neighbors(neighbors) def populate_interfaces(self, interfaces): facts = dict() for key, value in iteritems(interfaces): intf = dict() intf['description'] = self.parse_description(value) intf['macaddress'] = self.parse_macaddress(value) ipv4 = self.parse_ipv4(value) intf['ipv4'] = self.parse_ipv4(value) if ipv4: self.add_ip_address(ipv4['address'], 'ipv4') intf['mtu'] = self.parse_mtu(value) intf['bandwidth'] = self.parse_bandwidth(value) intf['mediatype'] = self.parse_mediatype(value) intf['duplex'] = self.parse_duplex(value) intf['lineprotocol'] = self.parse_lineprotocol(value) intf['operstatus'] = self.parse_operstatus(value) intf['type'] = self.parse_type(value) facts[key] = intf return facts def populate_ipv6_interfaces(self, data): for key, value in iteritems(data): self.facts['interfaces'][key]['ipv6'] = list() addresses = re.findall(r'\s+(.+), subnet', value, re.M) subnets = re.findall(r', subnet is (.+)$', value, re.M) for addr, subnet in zip(addresses, subnets): ipv6 = dict(address=addr.strip(), subnet=subnet.strip()) self.add_ip_address(addr.strip(), 'ipv6') self.facts['interfaces'][key]['ipv6'].append(ipv6) def add_ip_address(self, address, family): if family == 'ipv4': self.facts['all_ipv4_addresses'].append(address) else: self.facts['all_ipv6_addresses'].append(address) def parse_neighbors(self, neighbors): facts = dict() for entry in neighbors.split('------------------------------------------------'): if entry == '': continue intf = self.parse_lldp_intf(entry) if intf not in facts: facts[intf] = list() fact = dict() fact['host'] = self.parse_lldp_host(entry) fact['port'] = self.parse_lldp_port(entry) facts[intf].append(fact) return facts def parse_interfaces(self, data): parsed = dict() key = '' for line in data.split('\n'): if len(line) == 0: continue elif line[0] == ' ': parsed[key] += '\n%s' % line else: match = re.match(r'^(\S+)', line) if match: key = match.group(1) parsed[key] = line return parsed def parse_description(self, data): match = re.search(r'Description: (.+)$', data, re.M) if match: return match.group(1) def parse_macaddress(self, data): match = re.search(r'address is (\S+)', data) if match: return match.group(1) def parse_ipv4(self, data): match = re.search(r'Internet address is (\S+)', data) if match: addr, masklen = match.group(1).split('/') return dict(address=addr, masklen=int(masklen)) def parse_mtu(self, data): match = re.search(r'MTU (\d+)', data) if match: return int(match.group(1)) def parse_bandwidth(self, data): match = re.search(r'BW (\d+)', data) if match: return int(match.group(1)) def parse_duplex(self, data): match = re.search(r'(\w+) Duplex', data, re.M) if match: return match.group(1) def parse_mediatype(self, data): match = re.search(r'media type is (.+)$', data, re.M) if match: return match.group(1) def parse_type(self, data): match = re.search(r'Hardware is (.+),', data, re.M) if match: return match.group(1) def parse_lineprotocol(self, data): match = re.search(r'line protocol is (.+)$', data, re.M) if match: return match.group(1) def parse_operstatus(self, data): match = re.search(r'^(?:.+) is (.+),', data, re.M) if match: return match.group(1) def parse_lldp_intf(self, data): match = re.search(r'^Local Intf: (.+)$', data, re.M) if match: return match.group(1) def parse_lldp_host(self, data): match = re.search(r'System Name: (.+)$', data, re.M) if match: return match.group(1) def parse_lldp_port(self, data): match = re.search(r'Port id: (.+)$', data, re.M) if match: return match.group(1) FACT_SUBSETS = dict( default=Default, hardware=Hardware, interfaces=Interfaces, config=Config, ) VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) def main(): spec = dict( gather_subset=dict(default=['!config'], type='list') ) module = NetworkModule(argument_spec=spec, supports_check_mode=True) gather_subset = module.params['gather_subset'] runable_subsets = set() exclude_subsets = set() for subset in gather_subset: if subset == 'all': runable_subsets.update(VALID_SUBSETS) continue if subset.startswith('!'): subset = subset[1:] if subset == 'all': exclude_subsets.update(VALID_SUBSETS) continue exclude = True else: exclude = False if subset not in VALID_SUBSETS: module.fail_json(msg='Bad subset') if exclude: exclude_subsets.add(subset) else: runable_subsets.add(subset) if not runable_subsets: runable_subsets.update(VALID_SUBSETS) runable_subsets.difference_update(exclude_subsets) runable_subsets.add('default') facts = dict() facts['gather_subset'] = list(runable_subsets) instances = list() for key in runable_subsets: instances.append(FACT_SUBSETS[key](module)) failed_commands = list() try: for inst in instances: inst.populate() failed_commands.extend(inst.failed_commands) facts.update(inst.facts) except Exception: exc = get_exception() module.fail_json(msg=str(exc)) ansible_facts = dict() for key, value in iteritems(facts): key = 'ansible_net_%s' % key ansible_facts[key] = value module.exit_json(ansible_facts=ansible_facts, failed_commands=failed_commands) if __name__ == '__main__': main()
gpl-3.0
Kazade/NeHe-Website
google_appengine/lib/django-1.4/tests/regressiontests/fixtures_regress/models.py
33
5387
from __future__ import absolute_import from django.contrib.auth.models import User from django.db import models class Animal(models.Model): name = models.CharField(max_length=150) latin_name = models.CharField(max_length=150) count = models.IntegerField() weight = models.FloatField() # use a non-default name for the default manager specimens = models.Manager() def __unicode__(self): return self.name class Plant(models.Model): name = models.CharField(max_length=150) class Meta: # For testing when upper case letter in app name; regression for #4057 db_table = "Fixtures_regress_plant" class Stuff(models.Model): name = models.CharField(max_length=20, null=True) owner = models.ForeignKey(User, null=True) def __unicode__(self): return unicode(self.name) + u' is owned by ' + unicode(self.owner) class Absolute(models.Model): name = models.CharField(max_length=40) load_count = 0 def __init__(self, *args, **kwargs): super(Absolute, self).__init__(*args, **kwargs) Absolute.load_count += 1 class Parent(models.Model): name = models.CharField(max_length=10) class Meta: ordering = ('id',) class Child(Parent): data = models.CharField(max_length=10) # Models to regression test #7572 class Channel(models.Model): name = models.CharField(max_length=255) class Article(models.Model): title = models.CharField(max_length=255) channels = models.ManyToManyField(Channel) class Meta: ordering = ('id',) # Models to regression test #11428 class Widget(models.Model): name = models.CharField(max_length=255) class Meta: ordering = ('name',) def __unicode__(self): return self.name class WidgetProxy(Widget): class Meta: proxy = True # Check for forward references in FKs and M2Ms with natural keys class TestManager(models.Manager): def get_by_natural_key(self, key): return self.get(name=key) class Store(models.Model): objects = TestManager() name = models.CharField(max_length=255) class Meta: ordering = ('name',) def __unicode__(self): return self.name def natural_key(self): return (self.name,) class Person(models.Model): objects = TestManager() name = models.CharField(max_length=255) class Meta: ordering = ('name',) def __unicode__(self): return self.name # Person doesn't actually have a dependency on store, but we need to define # one to test the behavior of the dependency resolution algorithm. def natural_key(self): return (self.name,) natural_key.dependencies = ['fixtures_regress.store'] class Book(models.Model): name = models.CharField(max_length=255) author = models.ForeignKey(Person) stores = models.ManyToManyField(Store) class Meta: ordering = ('name',) def __unicode__(self): return u'%s by %s (available at %s)' % ( self.name, self.author.name, ', '.join(s.name for s in self.stores.all()) ) class NKManager(models.Manager): def get_by_natural_key(self, data): return self.get(data=data) class NKChild(Parent): data = models.CharField(max_length=10, unique=True) objects = NKManager() def natural_key(self): return self.data def __unicode__(self): return u'NKChild %s:%s' % (self.name, self.data) class RefToNKChild(models.Model): text = models.CharField(max_length=10) nk_fk = models.ForeignKey(NKChild, related_name='ref_fks') nk_m2m = models.ManyToManyField(NKChild, related_name='ref_m2ms') def __unicode__(self): return u'%s: Reference to %s [%s]' % ( self.text, self.nk_fk, ', '.join(str(o) for o in self.nk_m2m.all()) ) # ome models with pathological circular dependencies class Circle1(models.Model): name = models.CharField(max_length=255) def natural_key(self): return self.name natural_key.dependencies = ['fixtures_regress.circle2'] class Circle2(models.Model): name = models.CharField(max_length=255) def natural_key(self): return self.name natural_key.dependencies = ['fixtures_regress.circle1'] class Circle3(models.Model): name = models.CharField(max_length=255) def natural_key(self): return self.name natural_key.dependencies = ['fixtures_regress.circle3'] class Circle4(models.Model): name = models.CharField(max_length=255) def natural_key(self): return self.name natural_key.dependencies = ['fixtures_regress.circle5'] class Circle5(models.Model): name = models.CharField(max_length=255) def natural_key(self): return self.name natural_key.dependencies = ['fixtures_regress.circle6'] class Circle6(models.Model): name = models.CharField(max_length=255) def natural_key(self): return self.name natural_key.dependencies = ['fixtures_regress.circle4'] class ExternalDependency(models.Model): name = models.CharField(max_length=255) def natural_key(self): return self.name natural_key.dependencies = ['fixtures_regress.book'] # Model for regression test of #11101 class Thingy(models.Model): name = models.CharField(max_length=255)
bsd-3-clause
inveniosoftware/invenio-collections
invenio_collections/alembic/97faa437d867_create_collections_tables.py
3
2576
# # This file is part of Invenio. # Copyright (C) 2016 CERN. # # Invenio is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """Create collections tables.""" import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision = '97faa437d867' down_revision = 'ce7adcbe1c6c' branch_labels = () depends_on = None def upgrade(): """Upgrade database.""" op.create_table( 'collection', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('dbquery', sa.Text(), nullable=True), sa.Column('rgt', sa.Integer(), nullable=False), sa.Column('lft', sa.Integer(), nullable=False), sa.Column('level', sa.Integer(), nullable=False), sa.Column('parent_id', sa.Integer(), nullable=True), sa.Column('tree_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint( ['parent_id'], ['collection.id'], ondelete='CASCADE' ), sa.PrimaryKeyConstraint('id') ) op.create_index( 'collection_level_idx', 'collection', ['level'], unique=False ) op.create_index('collection_lft_idx', 'collection', ['lft'], unique=False) op.create_index('collection_rgt_idx', 'collection', ['rgt'], unique=False) op.create_index( op.f('ix_collection_name'), 'collection', ['name'], unique=True ) def downgrade(): """Downgrade database.""" op.drop_index(op.f('ix_collection_name'), table_name='collection') op.drop_index('collection_rgt_idx', table_name='collection') op.drop_index('collection_lft_idx', table_name='collection') op.drop_index('collection_level_idx', table_name='collection') op.drop_table('collection')
gpl-2.0
eezee-it/project-service
service_desk_issue/__openerp__.py
8
1526
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2012-2013 Daniel Reis # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Service Desk for Issues', 'summary': 'Use Project Issues for Service Desks and service teams', 'version': '8.0.1.1.0', "category": "Project Management", 'description': """\ This module extends the ``service_desk`` module to also work with Issues. Please refer to that module's description. """, 'author': "Daniel Reis,Odoo Community Association (OCA)", 'website': '', 'license': 'AGPL-3', 'depends': [ 'project_issue', 'service_desk', ], 'data': [ 'service_desk_view.xml', ], 'installable': True, 'auto_install': True, }
agpl-3.0
sobomax/virtualbox_64bit_edd
src/libs/xpcom18a4/python/xpt.py
26
17606
# ***** BEGIN LICENSE BLOCK ***** # Version: MPL 1.1/GPL 2.0/LGPL 2.1 # # The contents of this file are subject to the Mozilla Public License Version # 1.1 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # http://www.mozilla.org/MPL/ # # Software distributed under the License is distributed on an "AS IS" basis, # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License # for the specific language governing rights and limitations under the # License. # # The Original Code is the Python XPCOM language bindings. # # The Initial Developer of the Original Code is # ActiveState Tool Corp. # Portions created by the Initial Developer are Copyright (C) 2000, 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # David Ascher <[email protected]> (original author) # Mark Hammond <[email protected]> # # Alternatively, the contents of this file may be used under the terms of # either the GNU General Public License Version 2 or later (the "GPL"), or # the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), # in which case the provisions of the GPL or the LGPL are applicable instead # of those above. If you wish to allow use of your version of this file only # under the terms of either the GPL or the LGPL, and not to allow others to # use your version of this file under the terms of the MPL, indicate your # decision by deleting the provisions above and replace them with the notice # and other provisions required by the GPL or the LGPL. If you do not delete # the provisions above, a recipient may use your version of this file under # the terms of any one of the MPL, the GPL or the LGPL. # # ***** END LICENSE BLOCK ***** """ Program: xpt.py Task: describe interfaces etc using XPCOM reflection. Subtasks: output (nearly) exactly the same stuff as xpt_dump, for verification output Python source code that can be used as a template for an interface Status: Works pretty well if you ask me :-) Author: David Ascher did an original version that parsed XPT files directly. Mark Hammond changed it to use the reflection interfaces, but kept most of the printing logic. Revision: 0.1: March 6, 2000 0.2: April 2000 - Mark removed lots of Davids lovely parsing code in favour of the new xpcom interfaces that provide this info. May 2000 - Moved into Perforce - track the log there! Early 2001 - Moved into the Mozilla CVS tree - track the log there! Todo: Fill out this todo list. """ import string, sys import xpcom import xpcom._xpcom from xpcom_consts import * class Interface: def __init__(self, iid): iim = xpcom._xpcom.XPTI_GetInterfaceInfoManager() if hasattr(iid, "upper"): # Is it a stringy thing. item = iim.GetInfoForName(iid) else: item = iim.GetInfoForIID(iid) self.interface_info = item self.namespace = "" # where does this come from? self.methods = Methods(item) self.constants = Constants(item) # delegate attributes to the real interface def __getattr__(self, attr): return getattr(self.interface_info, attr) def GetParent(self): try: raw_parent = self.interface_info.GetParent() if raw_parent is None: return None return Interface(raw_parent.GetIID()) except xpcom.Exception: # Parent interface is probably not scriptable - assume nsISupports. if xpcom.verbose: # The user may be confused as to why this is happening! print "The parent interface of IID '%s' can not be located - assuming nsISupports" return Interface(xpcom._xpcom.IID_nsISupports) def Describe_Python(self): method_reprs = [] methods = filter(lambda m: not m.IsNotXPCOM(), self.methods) for m in methods: method_reprs.append(m.Describe_Python()) method_joiner = "\n" methods_repr = method_joiner.join(method_reprs) return \ """class %s: _com_interfaces_ = xpcom.components.interfaces.%s # If this object needs to be registered, the following 2 are also needed. # _reg_clsid_ = "{a new clsid generated for this object}" # _reg_contractid_ = "The.Object.Name"\n%s""" % (self.GetName(), self.GetIID().name, methods_repr) def Describe(self): # Make the IID look like xtp_dump - "(" instead of "{" iid_use = "(" + str(self.GetIID())[1:-1] + ")" s = ' - '+self.namespace+'::'+ self.GetName() + ' ' + iid_use + ':\n' parent = self.GetParent() if parent is not None: s = s + ' Parent: ' + parent.namespace + '::' + parent.GetName() + '\n' s = s + ' Flags:\n' if self.IsScriptable(): word = 'TRUE' else: word = 'FALSE' s = s + ' Scriptable: ' + word + '\n' s = s + ' Methods:\n' methods = filter(lambda m: not m.IsNotXPCOM(), self.methods) if len(methods): for m in methods: s = s + ' ' + m.Describe() + '\n' else: s = s + ' No Methods\n' s = s + ' Constants:\n' if self.constants: for c in self.constants: s = s + ' ' + c.Describe() + '\n' else: s = s + ' No Constants\n' return s # A class that allows caching and iterating of methods. class Methods: def __init__(self, interface_info): self.interface_info = interface_info try: self.items = [None] * interface_info.GetMethodCount() except xpcom.Exception: if xpcom.verbose: print "** GetMethodCount failed?? - assuming no methods" self.items = [] def __len__(self): return len(self.items) def __getitem__(self, index): ret = self.items[index] if ret is None: mi = self.interface_info.GetMethodInfo(index) ret = self.items[index] = Method(mi, index, self.interface_info) return ret class Method: def __init__(self, method_info, method_index, interface_info = None): self.interface_info = interface_info self.method_index = method_index self.flags, self.name, param_descs, self.result_desc = method_info # Build the params. self.params = [] pi=0 for pd in param_descs: self.params.append( Parameter(pd, pi, method_index, interface_info) ) pi = pi + 1 # Run over the params setting the "sizeof" params to hidden. for p in self.params: td = p.type_desc tag = XPT_TDP_TAG(td[0]) if tag==T_ARRAY and p.IsIn(): self.params[td[1]].hidden_indicator = 2 elif tag in [T_PSTRING_SIZE_IS, T_PWSTRING_SIZE_IS] and p.IsIn(): self.params[td[1]].hidden_indicator = 1 def IsGetter(self): return (self.flags & XPT_MD_GETTER) def IsSetter(self): return (self.flags & XPT_MD_SETTER) def IsNotXPCOM(self): return (self.flags & XPT_MD_NOTXPCOM) def IsConstructor(self): return (self.flags & XPT_MD_CTOR) def IsHidden(self): return (self.flags & XPT_MD_HIDDEN) def Describe_Python(self): if self.method_index < 3: # Ignore QI etc return "" base_name = self.name if self.IsGetter(): name = "get_%s" % (base_name,) elif self.IsSetter(): name = "set_%s" % (base_name,) else: name = base_name param_decls = ["self"] in_comments = [] out_descs = [] result_comment = "Result: void - None" for p in self.params: in_desc, in_desc_comments, out_desc, this_result_comment = p.Describe_Python() if in_desc is not None: param_decls.append(in_desc) if in_desc_comments is not None: in_comments.append(in_desc_comments) if out_desc is not None: out_descs.append(out_desc) if this_result_comment is not None: result_comment = this_result_comment joiner = "\n # " in_comment = out_desc = "" if in_comments: in_comment = joiner + joiner.join(in_comments) if out_descs: out_desc = joiner + joiner.join(out_descs) return """ def %s( %s ): # %s%s%s pass""" % (name, ", ".join(param_decls), result_comment, in_comment, out_desc) def Describe(self): s = '' if self.IsGetter(): G = 'G' else: G = ' ' if self.IsSetter(): S = 'S' else: S = ' ' if self.IsHidden(): H = 'H' else: H = ' ' if self.IsNotXPCOM(): N = 'N' else: N = ' ' if self.IsConstructor(): C = 'C' else: C = ' ' def desc(a): return a.Describe() method_desc = string.join(map(desc, self.params), ', ') result_type = TypeDescriber(self.result_desc[0], None) return_desc = result_type.Describe() i = string.find(return_desc, 'retval ') if i != -1: return_desc = return_desc[:i] + return_desc[i+len('retval '):] return G+S+H+N+C+' '+return_desc+' '+self.name + '('+ method_desc + ');' class Parameter: def __init__(self, param_desc, param_index, method_index, interface_info = None): self.param_flags, self.type_desc = param_desc self.hidden_indicator = 0 # Is this a special "size" type param that will be hidden from Python? self.param_index = param_index self.method_index= method_index self.interface_info = interface_info def __repr__(self): return "<param %(param_index)d (method %(method_index)d) - flags = 0x%(param_flags)x, type = %(type_desc)s>" % self.__dict__ def IsIn(self): return XPT_PD_IS_IN(self.param_flags) def IsOut(self): return XPT_PD_IS_OUT(self.param_flags) def IsInOut(self): return self.IsIn() and self.IsOut() def IsRetval(self): return XPT_PD_IS_RETVAL(self.param_flags) def IsShared(self): return XPT_PD_IS_SHARED(self.param_flags) def IsDipper(self): return XPT_PD_IS_DIPPER(self.param_flags) def Describe_Python(self): name = "param%d" % (self.param_index,) if self.hidden_indicator: # Could remove the comment - Im trying to tell the user where that param has # gone from the signature! return None, "%s is a hidden parameter" % (name,), None, None t = TypeDescriber(self.type_desc[0], self) decl = in_comment = out_comment = result_comment = None type_desc = t.Describe() if self.IsIn() and not self.IsDipper(): decl = name extra="" if self.IsOut(): extra = "Out" in_comment = "In%s: %s: %s" % (extra, name, type_desc) elif self.IsOut() or self.IsDipper(): if self.IsRetval(): result_comment = "Result: %s" % (type_desc,) else: out_comment = "Out: %s" % (type_desc,) return decl, in_comment, out_comment, result_comment def Describe(self): parts = [] if self.IsInOut(): parts.append('inout') elif self.IsIn(): parts.append('in') elif self.IsOut(): parts.append('out') if self.IsDipper(): parts.append("dipper") if self.IsRetval(): parts.append('retval') if self.IsShared(): parts.append('shared') t = TypeDescriber(self.type_desc[0], self) type_str = t.Describe() parts.append(type_str) return string.join(parts) # A class that allows caching and iterating of constants. class Constants: def __init__(self, interface_info): self.interface_info = interface_info try: self.items = [None] * interface_info.GetConstantCount() except xpcom.Exception: if xpcom.verbose: print "** GetConstantCount failed?? - assuming no constants" self.items = [] def __len__(self): return len(self.items) def __getitem__(self, index): ret = self.items[index] if ret is None: ci = self.interface_info.GetConstant(index) ret = self.items[index] = Constant(ci) return ret class Constant: def __init__(self, ci): self.name, self.type, self.value = ci def Describe(self): return TypeDescriber(self.type, None).Describe() + ' ' +self.name+' = '+str(self.value)+';' __str__ = Describe def MakeReprForInvoke(param): tag = param.type_desc[0] & XPT_TDP_TAGMASK if tag == T_INTERFACE: i_info = param.interface_info try: iid = i_info.GetIIDForParam(param.method_index, param.param_index) except xpcom.Exception: # IID not available (probably not scriptable) - just use nsISupports. iid = xpcom._xpcom.IID_nsISupports return param.type_desc[0], 0, 0, str(iid) elif tag == T_ARRAY: i_info = param.interface_info array_desc = i_info.GetTypeForParam(param.method_index, param.param_index, 1) return param.type_desc[:-1] + array_desc[:1] return param.type_desc class TypeDescriber: def __init__(self, type_flags, param): self.type_flags = type_flags self.tag = XPT_TDP_TAG(self.type_flags) self.param = param def IsPointer(self): return XPT_TDP_IS_POINTER(self.type_flags) def IsUniquePointer(self): return XPT_TDP_IS_UNIQUE_POINTER(self.type_flags) def IsReference(self): return XPT_TDP_IS_REFERENCE(self.type_flags) def repr_for_invoke(self): return (self.type_flags,) def GetName(self): is_ptr = self.IsPointer() data = type_info_map.get(self.tag) if data is None: data = ("unknown",) if self.IsReference(): if len(data) > 2: return data[2] return data[0] + " &" if self.IsPointer(): if len(data)>1: return data[1] return data[0] + " *" return data[0] def Describe(self): if self.tag == T_ARRAY: # NOTE - Adding a type specifier to the array is different from xpt_dump.exe if self.param is None or self.param.interface_info is None: type_desc = "" # Dont have explicit info about the array type :-( else: i_info = self.param.interface_info type_code = i_info.GetTypeForParam(self.param.method_index, self.param.param_index, 1) type_desc = TypeDescriber( type_code[0], None).Describe() return self.GetName() + "[" + type_desc + "]" elif self.tag == T_INTERFACE: if self.param is None or self.param.interface_info is None: return "nsISomething" # Dont have explicit info about the IID :-( i_info = self.param.interface_info m_index = self.param.method_index p_index = self.param.param_index try: iid = i_info.GetIIDForParam(m_index, p_index) return iid.name except xpcom.Exception: return "nsISomething" return self.GetName() # These are just for output purposes, so should be # the same as xpt_dump uses type_info_map = { T_I8 : ("int8",), T_I16 : ("int16",), T_I32 : ("int32",), T_I64 : ("int64",), T_U8 : ("uint8",), T_U16 : ("uint16",), T_U32 : ("uint32",), T_U64 : ("uint64",), T_FLOAT : ("float",), T_DOUBLE : ("double",), T_BOOL : ("boolean",), T_CHAR : ("char",), T_WCHAR : ("wchar_t", "wstring"), T_VOID : ("void",), T_IID : ("reserved", "nsIID *", "nsIID &"), T_DOMSTRING : ("DOMString",), T_CHAR_STR : ("reserved", "string"), T_WCHAR_STR : ("reserved", "wstring"), T_INTERFACE : ("reserved", "Interface"), T_INTERFACE_IS : ("reserved", "InterfaceIs *"), T_ARRAY : ("reserved", "Array"), T_PSTRING_SIZE_IS : ("reserved", "string_s"), T_PWSTRING_SIZE_IS : ("reserved", "wstring_s"), } def dump_interface(iid, mode): interface = Interface(iid) describer_name = "Describe" if mode == "xptinfo": mode = None if mode is not None: describer_name = describer_name + "_" + mode.capitalize() describer = getattr(interface, describer_name) print describer() if __name__=='__main__': if len(sys.argv) == 1: print "Usage: xpt.py [-xptinfo] interface_name, ..." print " -info: Dump in a style similar to the xptdump tool" print "Dumping nsISupports and nsIInterfaceInfo" sys.argv.append('nsIInterfaceInfo') sys.argv.append('-xptinfo') sys.argv.append('nsISupports') sys.argv.append('nsIInterfaceInfo') mode = "Python" for i in sys.argv[1:]: if i[0] == "-": mode = i[1:] else: dump_interface(i, mode)
gpl-2.0
tuxfux-hlp-notes/python-batches
batch-67/12-modules/myenv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/requirements.py
454
4355
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import string import re from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine from pkg_resources.extern.pyparsing import Literal as L # noqa from pkg_resources.extern.six.moves.urllib import parse as urlparse from .markers import MARKER_EXPR, Marker from .specifiers import LegacySpecifier, Specifier, SpecifierSet class InvalidRequirement(ValueError): """ An invalid requirement was found, users should refer to PEP 508. """ ALPHANUM = Word(string.ascii_letters + string.digits) LBRACKET = L("[").suppress() RBRACKET = L("]").suppress() LPAREN = L("(").suppress() RPAREN = L(")").suppress() COMMA = L(",").suppress() SEMICOLON = L(";").suppress() AT = L("@").suppress() PUNCTUATION = Word("-_.") IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) NAME = IDENTIFIER("name") EXTRA = IDENTIFIER URI = Regex(r'[^ ]+')("url") URL = (AT + URI) EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False)("_raw_spec") _VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) _VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '') VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") MARKER_EXPR.setParseAction( lambda s, l, t: Marker(s[t._original_start:t._original_end]) ) MARKER_SEPERATOR = SEMICOLON MARKER = MARKER_SEPERATOR + MARKER_EXPR VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) URL_AND_MARKER = URL + Optional(MARKER) NAMED_REQUIREMENT = \ NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd class Requirement(object): """Parse a requirement. Parse a given requirement string into its parts, such as name, specifier, URL, and extras. Raises InvalidRequirement on a badly-formed requirement string. """ # TODO: Can we test whether something is contained within a requirement? # If so how do we do that? Do we need to test against the _name_ of # the thing as well as the version? What about the markers? # TODO: Can we normalize the name and extra name? def __init__(self, requirement_string): try: req = REQUIREMENT.parseString(requirement_string) except ParseException as e: raise InvalidRequirement( "Invalid requirement, parse error at \"{0!r}\"".format( requirement_string[e.loc:e.loc + 8])) self.name = req.name if req.url: parsed_url = urlparse.urlparse(req.url) if not (parsed_url.scheme and parsed_url.netloc) or ( not parsed_url.scheme and not parsed_url.netloc): raise InvalidRequirement("Invalid URL given") self.url = req.url else: self.url = None self.extras = set(req.extras.asList() if req.extras else []) self.specifier = SpecifierSet(req.specifier) self.marker = req.marker if req.marker else None def __str__(self): parts = [self.name] if self.extras: parts.append("[{0}]".format(",".join(sorted(self.extras)))) if self.specifier: parts.append(str(self.specifier)) if self.url: parts.append("@ {0}".format(self.url)) if self.marker: parts.append("; {0}".format(self.marker)) return "".join(parts) def __repr__(self): return "<Requirement({0!r})>".format(str(self))
gpl-3.0
yuruofeifei/mxnet
python/mxnet/gluon/model_zoo/vision/densenet.py
10
7848
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 # pylint: disable= arguments-differ """DenseNet, implemented in Gluon.""" __all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201'] from ....context import cpu from ...block import HybridBlock from ... import nn from ..custom_layers import HybridConcurrent, Identity # Helpers def _make_dense_block(num_layers, bn_size, growth_rate, dropout, stage_index): out = nn.HybridSequential(prefix='stage%d_'%stage_index) with out.name_scope(): for _ in range(num_layers): out.add(_make_dense_layer(growth_rate, bn_size, dropout)) return out def _make_dense_layer(growth_rate, bn_size, dropout): new_features = nn.HybridSequential(prefix='') new_features.add(nn.BatchNorm()) new_features.add(nn.Activation('relu')) new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False)) new_features.add(nn.BatchNorm()) new_features.add(nn.Activation('relu')) new_features.add(nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False)) if dropout: new_features.add(nn.Dropout(dropout)) out = HybridConcurrent(concat_dim=1, prefix='') out.add(Identity()) out.add(new_features) return out def _make_transition(num_output_features): out = nn.HybridSequential(prefix='') out.add(nn.BatchNorm()) out.add(nn.Activation('relu')) out.add(nn.Conv2D(num_output_features, kernel_size=1, use_bias=False)) out.add(nn.AvgPool2D(pool_size=2, strides=2)) return out # Net class DenseNet(HybridBlock): r"""Densenet-BC model from the `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper. Parameters ---------- num_init_features : int Number of filters to learn in the first convolution layer. growth_rate : int Number of filters to add each layer (`k` in the paper). block_config : list of int List of integers for numbers of layers in each pooling block. bn_size : int, default 4 Multiplicative factor for number of bottle neck layers. (i.e. bn_size * k features in the bottleneck layer) dropout : float, default 0 Rate of dropout after each dense layer. classes : int, default 1000 Number of classification classes. """ def __init__(self, num_init_features, growth_rate, block_config, bn_size=4, dropout=0, classes=1000, **kwargs): super(DenseNet, self).__init__(**kwargs) with self.name_scope(): self.features = nn.HybridSequential(prefix='') self.features.add(nn.Conv2D(num_init_features, kernel_size=7, strides=2, padding=3, use_bias=False)) self.features.add(nn.BatchNorm()) self.features.add(nn.Activation('relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1)) # Add dense blocks num_features = num_init_features for i, num_layers in enumerate(block_config): self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1)) num_features = num_features + num_layers * growth_rate if i != len(block_config) - 1: self.features.add(_make_transition(num_features // 2)) num_features = num_features // 2 self.features.add(nn.BatchNorm()) self.features.add(nn.Activation('relu')) self.features.add(nn.AvgPool2D(pool_size=7)) self.features.add(nn.Flatten()) self.output = nn.Dense(classes) def hybrid_forward(self, F, x): x = self.features(x) x = self.output(x) return x # Specification densenet_spec = {121: (64, 32, [6, 12, 24, 16]), 161: (96, 48, [6, 12, 36, 24]), 169: (64, 32, [6, 12, 32, 32]), 201: (64, 32, [6, 12, 48, 32])} # Constructor def get_densenet(num_layers, pretrained=False, ctx=cpu(), root='~/.mxnet/models', **kwargs): r"""Densenet-BC model from the `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper. Parameters ---------- num_layers : int Number of layers for the variant of densenet. Options are 121, 161, 169, 201. pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ num_init_features, growth_rate, block_config = densenet_spec[num_layers] net = DenseNet(num_init_features, growth_rate, block_config, **kwargs) if pretrained: from ..model_store import get_model_file net.load_params(get_model_file('densenet%d'%(num_layers), root=root), ctx=ctx) return net def densenet121(**kwargs): r"""Densenet-BC 121-layer model from the `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_densenet(121, **kwargs) def densenet161(**kwargs): r"""Densenet-BC 161-layer model from the `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_densenet(161, **kwargs) def densenet169(**kwargs): r"""Densenet-BC 169-layer model from the `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_densenet(169, **kwargs) def densenet201(**kwargs): r"""Densenet-BC 201-layer model from the `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_densenet(201, **kwargs)
apache-2.0
maestrano/openerp
openerp/addons/account_payment/wizard/account_payment_populate_statement.py
40
6057
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from lxml import etree from openerp.osv import fields, osv class account_payment_populate_statement(osv.osv_memory): _name = "account.payment.populate.statement" _description = "Account Payment Populate Statement" _columns = { 'lines': fields.many2many('payment.line', 'payment_line_rel_', 'payment_id', 'line_id', 'Payment Lines') } def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False): line_obj = self.pool.get('payment.line') res = super(account_payment_populate_statement, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False) line_ids = line_obj.search(cr, uid, [ ('move_line_id.reconcile_id', '=', False), ('bank_statement_line_id', '=', False), ('move_line_id.state','=','valid')]) line_ids.extend(line_obj.search(cr, uid, [ ('move_line_id.reconcile_id', '=', False), ('order_id.mode', '=', False), ('move_line_id.state','=','valid')])) domain = '[("id", "in", '+ str(line_ids)+')]' doc = etree.XML(res['arch']) nodes = doc.xpath("//field[@name='lines']") for node in nodes: node.set('domain', domain) res['arch'] = etree.tostring(doc) return res def populate_statement(self, cr, uid, ids, context=None): line_obj = self.pool.get('payment.line') statement_obj = self.pool.get('account.bank.statement') statement_line_obj = self.pool.get('account.bank.statement.line') currency_obj = self.pool.get('res.currency') voucher_obj = self.pool.get('account.voucher') voucher_line_obj = self.pool.get('account.voucher.line') move_line_obj = self.pool.get('account.move.line') if context is None: context = {} data = self.read(cr, uid, ids, [], context=context)[0] line_ids = data['lines'] if not line_ids: return {'type': 'ir.actions.act_window_close'} statement = statement_obj.browse(cr, uid, context['active_id'], context=context) for line in line_obj.browse(cr, uid, line_ids, context=context): ctx = context.copy() ctx['date'] = line.ml_maturity_date # was value_date earlier,but this field exists no more now amount = currency_obj.compute(cr, uid, line.currency.id, statement.currency.id, line.amount_currency, context=ctx) if not line.move_line_id.id: continue context.update({'move_line_ids': [line.move_line_id.id]}) result = voucher_obj.onchange_partner_id(cr, uid, [], partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=abs(amount), currency_id= statement.currency.id, ttype='payment', date=line.ml_maturity_date, context=context) if line.move_line_id: voucher_res = { 'type': 'payment', 'name': line.name, 'partner_id': line.partner_id.id, 'journal_id': statement.journal_id.id, 'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id), 'company_id': statement.company_id.id, 'currency_id': statement.currency.id, 'date': line.date or time.strftime('%Y-%m-%d'), 'amount': abs(amount), 'period_id': statement.period_id.id, } voucher_id = voucher_obj.create(cr, uid, voucher_res, context=context) voucher_line_dict = {} for line_dict in result['value']['line_cr_ids'] + result['value']['line_dr_ids']: move_line = move_line_obj.browse(cr, uid, line_dict['move_line_id'], context) if line.move_line_id.move_id.id == move_line.move_id.id: voucher_line_dict = line_dict if voucher_line_dict: voucher_line_dict.update({'voucher_id': voucher_id}) voucher_line_obj.create(cr, uid, voucher_line_dict, context=context) st_line_id = statement_line_obj.create(cr, uid, { 'name': line.order_id.reference or '?', 'amount': - amount, 'type': 'supplier', 'partner_id': line.partner_id.id, 'account_id': line.move_line_id.account_id.id, 'statement_id': statement.id, 'ref': line.communication, 'voucher_id': voucher_id, }, context=context) line_obj.write(cr, uid, [line.id], {'bank_statement_line_id': st_line_id}) return {'type': 'ir.actions.act_window_close'} account_payment_populate_statement() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ammaradil/fibonacci
Lib/site-packages/django/contrib/gis/db/models/aggregates.py
414
2395
from django.contrib.gis.db.models.fields import ExtentField from django.db.models.aggregates import Aggregate __all__ = ['Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union'] class GeoAggregate(Aggregate): function = None is_extent = False def as_sql(self, compiler, connection): # this will be called again in parent, but it's needed now - before # we get the spatial_aggregate_name connection.ops.check_expression_support(self) self.function = connection.ops.spatial_aggregate_name(self.name) return super(GeoAggregate, self).as_sql(compiler, connection) def as_oracle(self, compiler, connection): if not hasattr(self, 'tolerance'): self.tolerance = 0.05 self.extra['tolerance'] = self.tolerance if not self.is_extent: self.template = '%(function)s(SDOAGGRTYPE(%(expressions)s,%(tolerance)s))' return self.as_sql(compiler, connection) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = super(GeoAggregate, self).resolve_expression(query, allow_joins, reuse, summarize, for_save) for expr in c.get_source_expressions(): if not hasattr(expr.field, 'geom_type'): raise ValueError('Geospatial aggregates only allowed on geometry fields.') return c def convert_value(self, value, expression, connection, context): return connection.ops.convert_geom(value, self.output_field) class Collect(GeoAggregate): name = 'Collect' class Extent(GeoAggregate): name = 'Extent' is_extent = '2D' def __init__(self, expression, **extra): super(Extent, self).__init__(expression, output_field=ExtentField(), **extra) def convert_value(self, value, expression, connection, context): return connection.ops.convert_extent(value, context.get('transformed_srid')) class Extent3D(GeoAggregate): name = 'Extent3D' is_extent = '3D' def __init__(self, expression, **extra): super(Extent3D, self).__init__(expression, output_field=ExtentField(), **extra) def convert_value(self, value, expression, connection, context): return connection.ops.convert_extent3d(value, context.get('transformed_srid')) class MakeLine(GeoAggregate): name = 'MakeLine' class Union(GeoAggregate): name = 'Union'
mit
lihui7115/ChromiumGStreamerBackend
tools/telemetry/telemetry/web_perf/metrics/blob_timeline_unittest.py
14
5994
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from collections import namedtuple from telemetry.internal.results import page_test_results from telemetry.page import page from telemetry.web_perf.metrics import blob_timeline from telemetry.web_perf import timeline_interaction_record FakeEvent = namedtuple('Event', 'name, start, end, thread_duration, args') Interaction = timeline_interaction_record.TimelineInteractionRecord TEST_INTERACTION_LABEL = 'Action_TestInteraction' WRITE_EVENT_NAME = 'Registry::RegisterBlob' READ_EVENT_NAME = 'BlobRequest' def GetBlobMetrics(events, interactions): results = page_test_results.PageTestResults() test_page = page.Page('file://blank.html') results.WillRunPage(test_page) blob_timeline.BlobTimelineMetric()._AddWriteResultsInternal( events, interactions, results) # pylint:disable=protected-access blob_timeline.BlobTimelineMetric()._AddReadResultsInternal( events, interactions, results) # pylint:disable=protected-access return_dict = dict((value.name, value.values) for value in results.current_page_run.values) results.DidRunPage(test_page) return return_dict def FakeWriteEvent(start, end, thread_duration=None): if not thread_duration: thread_duration = end - start return FakeEvent(blob_timeline.WRITE_EVENT_NAME, start, end, thread_duration, {'uuid':'fakeuuid'}) def FakeReadEvent(start, end, uuid, thread_duration=None): if not thread_duration: thread_duration = end - start return FakeEvent(blob_timeline.READ_EVENT_NAME, start, end, thread_duration, {'uuid': uuid}) def TestInteraction(start, end): return Interaction(TEST_INTERACTION_LABEL, start, end) class BlobTimelineMetricUnitTest(unittest.TestCase): def testWriteMetric(self): events = [FakeWriteEvent(0, 1), FakeWriteEvent(9, 11), FakeWriteEvent(10, 13), FakeWriteEvent(20, 24), FakeWriteEvent(21, 26), FakeWriteEvent(29, 35), FakeWriteEvent(30, 37), FakeWriteEvent(40, 48), FakeWriteEvent(41, 50), FakeEvent('something', 10, 13, 3, {}), FakeEvent('FrameView::something', 20, 24, 4, {}), FakeEvent('SomeThing::performLayout', 30, 37, 7, {}), FakeEvent('something else', 40, 48, 8, {})] interactions = [TestInteraction(10, 20), TestInteraction(30, 40)] self.assertEqual({'blob-reads': None, 'blob-writes': None}, GetBlobMetrics(events, [])) self.assertEqual({'blob-reads': None, 'blob-writes': None}, GetBlobMetrics([], interactions)) # The first event starts before the first interaction, so it is ignored. # The second event starts before the first interaction, so it is ignored. # The third event starts during the first interaction, and its duration is # 13 - 10 = 3. # The fourth event starts during the first interaction, and its duration is # 24 - 20 = 4. # The fifth event starts between the two interactions, so it is ignored. # The sixth event starts between the two interactions, so it is ignored. # The seventh event starts during the second interaction, and its duration # is 37 - 30 = 7. # The eighth event starts during the second interaction and its duration is # 48 - 40 = 8. # The ninth event starts after the last interaction, so it is ignored. # The rest of the events are not layout events, so they are ignored. self.assertEqual({'blob-reads': None, 'blob-writes': [3, 4, 7, 8]}, GetBlobMetrics(events, interactions)) def testReadMetric(self): events = [FakeReadEvent(0, 1, 'a'), FakeReadEvent(9, 11, 'a'), FakeReadEvent(10, 13, 'b', 1), # counts FakeReadEvent(15, 18, 'b'), # counts FakeReadEvent(21, 26, 'b'), FakeReadEvent(29, 35, 'c'), FakeReadEvent(31, 32, 'e'), # counts FakeReadEvent(34, 36, 'e', 1), # counts FakeReadEvent(32, 37, 'd'), # counts FakeEvent('something', 10, 13, 3, {}), FakeEvent('something else', 40, 48, 8, {})] interactions = [TestInteraction(10, 20), TestInteraction(30, 40)] self.assertEqual({'blob-reads': None, 'blob-writes': None}, GetBlobMetrics(events, [])) self.assertEqual({'blob-reads': None, 'blob-writes': None}, GetBlobMetrics([], interactions)) # We ignore events outside of the interaction intervals, and we use the # begining of the first event of the interval and the end of the last # event. # 18 - 10 = 8 # 37 - 32 = 5 self.assertEqual({'blob-reads': [4, 2, 5], 'blob-writes': None}, GetBlobMetrics(events, interactions)) def testReadAndWriteMetrics(self): events = [FakeReadEvent(0, 1, 'a'), FakeReadEvent(9, 11, 'a'), FakeReadEvent(10, 13, 'b'), # counts FakeWriteEvent(15, 18), # counts FakeReadEvent(21, 26, 'c'), FakeReadEvent(29, 35, 'd'), FakeWriteEvent(31, 34, 1), # counts FakeReadEvent(32, 33, 'e'), # counts FakeReadEvent(34, 35, 'e'), # counts FakeEvent('something', 31, 33, 2, {})] interactions = [TestInteraction(10, 20), TestInteraction(30, 35)] self.assertEqual({'blob-reads': None, 'blob-writes': None}, GetBlobMetrics(events, [])) self.assertEqual({'blob-reads': None, 'blob-writes': None}, GetBlobMetrics([], interactions)) # We use the read events in the interactions, so the same as the test above. self.assertEqual({'blob-reads': [3, 2], 'blob-writes': [3, 1]}, GetBlobMetrics(events, interactions))
bsd-3-clause
huongttlan/bokeh
bokeh/compat/mplexporter/renderers/base.py
44
14355
import warnings import itertools from contextlib import contextmanager import numpy as np from matplotlib import transforms from .. import utils from .. import _py3k_compat as py3k class Renderer(object): @staticmethod def ax_zoomable(ax): return bool(ax and ax.get_navigate()) @staticmethod def ax_has_xgrid(ax): return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines()) @staticmethod def ax_has_ygrid(ax): return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines()) @property def current_ax_zoomable(self): return self.ax_zoomable(self._current_ax) @property def current_ax_has_xgrid(self): return self.ax_has_xgrid(self._current_ax) @property def current_ax_has_ygrid(self): return self.ax_has_ygrid(self._current_ax) @contextmanager def draw_figure(self, fig, props): if hasattr(self, "_current_fig") and self._current_fig is not None: warnings.warn("figure embedded in figure: something is wrong") self._current_fig = fig self._fig_props = props self.open_figure(fig=fig, props=props) yield self.close_figure(fig=fig) self._current_fig = None self._fig_props = {} @contextmanager def draw_axes(self, ax, props): if hasattr(self, "_current_ax") and self._current_ax is not None: warnings.warn("axes embedded in axes: something is wrong") self._current_ax = ax self._ax_props = props self.open_axes(ax=ax, props=props) yield self.close_axes(ax=ax) self._current_ax = None self._ax_props = {} @contextmanager def draw_legend(self, legend, props): self._current_legend = legend self._legend_props = props self.open_legend(legend=legend, props=props) yield self.close_legend(legend=legend) self._current_legend = None self._legend_props = {} # Following are the functions which should be overloaded in subclasses def open_figure(self, fig, props): """ Begin commands for a particular figure. Parameters ---------- fig : matplotlib.Figure The Figure which will contain the ensuing axes and elements props : dictionary The dictionary of figure properties """ pass def close_figure(self, fig): """ Finish commands for a particular figure. Parameters ---------- fig : matplotlib.Figure The figure which is finished being drawn. """ pass def open_axes(self, ax, props): """ Begin commands for a particular axes. Parameters ---------- ax : matplotlib.Axes The Axes which will contain the ensuing axes and elements props : dictionary The dictionary of axes properties """ pass def close_axes(self, ax): """ Finish commands for a particular axes. Parameters ---------- ax : matplotlib.Axes The Axes which is finished being drawn. """ pass def open_legend(self, legend, props): """ Beging commands for a particular legend. Parameters ---------- legend : matplotlib.legend.Legend The Legend that will contain the ensuing elements props : dictionary The dictionary of legend properties """ pass def close_legend(self, legend): """ Finish commands for a particular legend. Parameters ---------- legend : matplotlib.legend.Legend The Legend which is finished being drawn """ pass def draw_marked_line(self, data, coordinates, linestyle, markerstyle, label, mplobj=None): """Draw a line that also has markers. If this isn't reimplemented by a renderer object, by default, it will make a call to BOTH draw_line and draw_markers when both markerstyle and linestyle are not None in the same Line2D object. """ if linestyle is not None: self.draw_line(data, coordinates, linestyle, label, mplobj) if markerstyle is not None: self.draw_markers(data, coordinates, markerstyle, label, mplobj) def draw_line(self, data, coordinates, style, label, mplobj=None): """ Draw a line. By default, draw the line via the draw_path() command. Some renderers might wish to override this and provide more fine-grained behavior. In matplotlib, lines are generally created via the plt.plot() command, though this command also can create marker collections. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the line. mplobj : matplotlib object the matplotlib plot element which generated this line """ pathcodes = ['M'] + (data.shape[0] - 1) * ['L'] pathstyle = dict(facecolor='none', **style) pathstyle['edgecolor'] = pathstyle.pop('color') pathstyle['edgewidth'] = pathstyle.pop('linewidth') self.draw_path(data=data, coordinates=coordinates, pathcodes=pathcodes, style=pathstyle, mplobj=mplobj) @staticmethod def _iter_path_collection(paths, path_transforms, offsets, styles): """Build an iterator over the elements of the path collection""" N = max(len(paths), len(offsets)) if not path_transforms: path_transforms = [np.eye(3)] edgecolor = styles['edgecolor'] if np.size(edgecolor) == 0: edgecolor = ['none'] facecolor = styles['facecolor'] if np.size(facecolor) == 0: facecolor = ['none'] elements = [paths, path_transforms, offsets, edgecolor, styles['linewidth'], facecolor] it = itertools return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N) def draw_path_collection(self, paths, path_coordinates, path_transforms, offsets, offset_coordinates, offset_order, styles, mplobj=None): """ Draw a collection of paths. The paths, offsets, and styles are all iterables, and the number of paths is max(len(paths), len(offsets)). By default, this is implemented via multiple calls to the draw_path() function. For efficiency, Renderers may choose to customize this implementation. Examples of path collections created by matplotlib are scatter plots, histograms, contour plots, and many others. Parameters ---------- paths : list list of tuples, where each tuple has two elements: (data, pathcodes). See draw_path() for a description of these. path_coordinates: string the coordinates code for the paths, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. path_transforms: array_like an array of shape (*, 3, 3), giving a series of 2D Affine transforms for the paths. These encode translations, rotations, and scalings in the standard way. offsets: array_like An array of offsets of shape (N, 2) offset_coordinates : string the coordinates code for the offsets, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. offset_order : string either "before" or "after". This specifies whether the offset is applied before the path transform, or after. The matplotlib backend equivalent is "before"->"data", "after"->"screen". styles: dictionary A dictionary in which each value is a list of length N, containing the style(s) for the paths. mplobj : matplotlib object the matplotlib plot element which generated this collection """ if offset_order == "before": raise NotImplementedError("offset before transform") for tup in self._iter_path_collection(paths, path_transforms, offsets, styles): (path, path_transform, offset, ec, lw, fc) = tup vertices, pathcodes = path path_transform = transforms.Affine2D(path_transform) vertices = path_transform.transform(vertices) # This is a hack: if path_coordinates == "figure": path_coordinates = "points" style = {"edgecolor": utils.color_to_hex(ec), "facecolor": utils.color_to_hex(fc), "edgewidth": lw, "dasharray": "10,0", "alpha": styles['alpha'], "zorder": styles['zorder']} self.draw_path(data=vertices, coordinates=path_coordinates, pathcodes=pathcodes, style=style, offset=offset, offset_coordinates=offset_coordinates, mplobj=mplobj) def draw_markers(self, data, coordinates, style, label, mplobj=None): """ Draw a set of markers. By default, this is done by repeatedly calling draw_path(), but renderers should generally overload this method to provide a more efficient implementation. In matplotlib, markers are created using the plt.plot() command. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the markers. mplobj : matplotlib object the matplotlib plot element which generated this marker collection """ vertices, pathcodes = style['markerpath'] pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor', 'facecolor', 'zorder', 'edgewidth']) pathstyle['dasharray'] = "10,0" for vertex in data: self.draw_path(data=vertices, coordinates="points", pathcodes=pathcodes, style=pathstyle, offset=vertex, offset_coordinates=coordinates, mplobj=mplobj) def draw_text(self, text, position, coordinates, style, text_type=None, mplobj=None): """ Draw text on the image. Parameters ---------- text : string The text to draw position : tuple The (x, y) position of the text coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the text. text_type : string or None if specified, a type of text such as "xlabel", "ylabel", "title" mplobj : matplotlib object the matplotlib plot element which generated this text """ raise NotImplementedError() def draw_path(self, data, coordinates, pathcodes, style, offset=None, offset_coordinates="data", mplobj=None): """ Draw a path. In matplotlib, paths are created by filled regions, histograms, contour plots, patches, etc. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, 'figure' for figure (pixel) coordinates, or "points" for raw point coordinates (useful in conjunction with offsets, below). pathcodes : list A list of single-character SVG pathcodes associated with the data. Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't', 'S', 's', 'C', 'c', 'Z', 'z'] See the SVG specification for details. Note that some path codes consume more than one datapoint (while 'Z' consumes none), so in general, the length of the pathcodes list will not be the same as that of the data array. style : dictionary a dictionary specifying the appearance of the line. offset : list (optional) the (x, y) offset of the path. If not given, no offset will be used. offset_coordinates : string (optional) A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. mplobj : matplotlib object the matplotlib plot element which generated this path """ raise NotImplementedError() def draw_image(self, imdata, extent, coordinates, style, mplobj=None): """ Draw an image. Parameters ---------- imdata : string base64 encoded png representation of the image extent : list the axes extent of the image: [xmin, xmax, ymin, ymax] coordinates: string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the image mplobj : matplotlib object the matplotlib plot object which generated this image """ raise NotImplementedError()
bsd-3-clause
edison7500/dugong
apps/images/migrations/0001_initial.py
1
1994
# -*- coding: utf-8 -*- # Generated by Django 1.11.17 on 2018-12-25 06:14 from __future__ import unicode_literals import apps.images.handlers from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [("contenttypes", "0002_remove_content_type_name")] operations = [ migrations.CreateModel( name="Image", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "file", models.ImageField( upload_to=apps.images.handlers.UUIDFilename("images/") ), ), ("description", models.CharField(blank=True, max_length=255)), ("is_cover", models.BooleanField(default=False)), ( "object_id", models.PositiveIntegerField(db_index=True, null=True), ), ( "uploaded_at", models.DateTimeField(default=django.utils.timezone.now), ), ( "content_type", models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="contenttypes.ContentType", ), ), ], options={ "verbose_name": "photo", "verbose_name_plural": "photos", "db_table": "generic_image", "ordering": ["-uploaded_at"], "abstract": False, }, ) ]
gpl-3.0
bawaaaaah/Phoenix
tools/xml2sqlite.py
1
4763
''' This file is used to combine multiple no-intro xml files into one sqlite file. This is used for checksum lookups to set up the artwork scraper. ''' import xml.etree.ElementTree as ET import sqlite3 import os import sys import getopt UNKNOWN_VALUE = "" TABLE_VALUE = "NOINTRO" VERSION_FILE = "console_database_version.txt" def xml2sqlite(file, connection): ''' Converts opens up an XML file and inserts found elements into an SQL database. ''' with open(file, 'r') as open_file: tree = ET.parse(open_file) root = tree.getroot() c = connection.cursor() try: for child in root: #(child.tag == game), (child.attrib == [name]) if child.tag != "game": continue gamename = UNKNOWN_VALUE description = UNKNOWN_VALUE romname = UNKNOWN_VALUE size = UNKNOWN_VALUE crc = UNKNOWN_VALUE sha1 = UNKNOWN_VALUE if "name" in child.attrib.keys(): gamename = child.attrib['name'] for subchild in child: #(subchild.tag == ["description", "rom"]), (subchild.attrib == [name, size, crc, sha1]) keys = subchild.attrib.keys() if subchild.tag == "description": print("[ADD] ", subchild.text) description = subchild.text if "name" in keys: romname = subchild.attrib['name'] if "size" in keys: size = subchild.attrib['size'] if "crc" in keys: crc = subchild.attrib['crc'] if "sha1" in keys: sha1 = subchild.attrib['sha1'] params = (gamename, description, romname, size, crc, sha1) if "" in params: continue; c.execute("INSERT INTO " + TABLE_VALUE + " VALUES (?, ?, ?, ?, ?, ?)", params) connection.commit() c.close() return True; except sqlite3.Error as err: for i in err.args: print(i) c.close() return False def create_version_file(files_list): with open(VERSION_FILE, "w") as out_file: for i in files_list: out_file.write("{:}\n".format(i)) def main(argv): files_list = list() out_file = "" try: opts, args = getopt.getopt(argv,"hi:o:",["input=","output="]) except getopt.GetoptError: print("xml2sqlite.py -i '[<input_file>, <input_file>]' -o <output_file>") sys.exit(2) for opt, arg in opts: if opt == "-h": print("\nxml2sqlite.py -i <input_directory> -o <output_file>") print("\n-i, --input = Takes in directory to where the xml files are located") print("-o, --output = Is a single file") sys.exit() elif opt in ("-i", "--input"): if not os.path.isdir(arg): print("Input directory does not exist.") sys.exit(2) files_list = [os.path.join(arg, i) for i in os.listdir(arg) if os.path.isfile(os.path.join(arg, i)) ] elif opt in ("-o", "--output"): out_file = arg if len(files_list) == 0 or out_file == "": print("args aren't correct") sys.exit(2) if os.path.isfile(out_file): os.remove(out_file) connection = sqlite3.connect(out_file) c = connection.cursor() c.execute("CREATE TABLE " + TABLE_VALUE + " (gamename TEXT, description TEXT, romname TEXT, size TEXT, crc TEXT, sha1 TEXT)") print("Generating database...") results = list() for i in files_list: if ".xml" not in i: print("\nSkipping ", i, "\n") continue status = xml2sqlite(i, connection) if status: results.append("[OK] {:}".format(i)) else: results.append("[Error] {:}".format(i)) c.close() create_version_file(results) print() for i in results: print(i) def test_read(): ''' This used to test the created database with single lookups. ''' out_file = os.path.dirname(os.path.realpath(__file__)) + "/gamedatabase.db" connection = sqlite3.connect(out_file) c = connection.cursor() c.execute("SELECT * FROM " + TABLE_VALUE) for row in c: print(row) c.close() if __name__ == "__main__": main(sys.argv[1:])
gpl-2.0
xiandiancloud/edx-platform
lms/djangoapps/certificates/migrations/0002_auto__add_field_generatedcertificate_download_url.py
188
6807
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'GeneratedCertificate.download_url' db.add_column('certificates_generatedcertificate', 'download_url', self.gf('django.db.models.fields.CharField')(max_length=128, null=True), keep_default=False) def backwards(self, orm): # Deleting field 'GeneratedCertificate.download_url' db.delete_column('certificates_generatedcertificate', 'download_url') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}), 'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}), 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}), 'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, 'certificates.generatedcertificate': { 'Meta': {'object_name': 'GeneratedCertificate'}, 'certificate_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'download_url': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['certificates']
agpl-3.0
chvrga/outdoor-explorer
java/play-1.4.4/python/Lib/xml/dom/minicompat.py
7
3439
"""Python version compatibility support for minidom.""" # This module should only be imported using "import *". # # The following names are defined: # # NodeList -- lightest possible NodeList implementation # # EmptyNodeList -- lightest possible NodeList that is guarateed to # remain empty (immutable) # # StringTypes -- tuple of defined string types # # defproperty -- function used in conjunction with GetattrMagic; # using these together is needed to make them work # as efficiently as possible in both Python 2.2+ # and older versions. For example: # # class MyClass(GetattrMagic): # def _get_myattr(self): # return something # # defproperty(MyClass, "myattr", # "return some value") # # For Python 2.2 and newer, this will construct a # property object on the class, which avoids # needing to override __getattr__(). It will only # work for read-only attributes. # # For older versions of Python, inheriting from # GetattrMagic will use the traditional # __getattr__() hackery to achieve the same effect, # but less efficiently. # # defproperty() should be used for each version of # the relevant _get_<property>() function. __all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"] import xml.dom try: unicode except NameError: StringTypes = type(''), else: StringTypes = type(''), type(unicode('')) class NodeList(list): __slots__ = () def item(self, index): if 0 <= index < len(self): return self[index] def _get_length(self): return len(self) def _set_length(self, value): raise xml.dom.NoModificationAllowedErr( "attempt to modify read-only attribute 'length'") length = property(_get_length, _set_length, doc="The number of nodes in the NodeList.") def __getstate__(self): return list(self) def __setstate__(self, state): self[:] = state class EmptyNodeList(tuple): __slots__ = () def __add__(self, other): NL = NodeList() NL.extend(other) return NL def __radd__(self, other): NL = NodeList() NL.extend(other) return NL def item(self, index): return None def _get_length(self): return 0 def _set_length(self, value): raise xml.dom.NoModificationAllowedErr( "attempt to modify read-only attribute 'length'") length = property(_get_length, _set_length, doc="The number of nodes in the NodeList.") def defproperty(klass, name, doc): get = getattr(klass, ("_get_" + name)).im_func def set(self, value, name=name): raise xml.dom.NoModificationAllowedErr( "attempt to modify read-only attribute " + repr(name)) assert not hasattr(klass, "_set_" + name), \ "expected not to find _set_" + name prop = property(get, set, doc=doc) setattr(klass, name, prop)
mit
crazcalm/AngelHack_python34
myenv/Lib/site-packages/jinja2/_compat.py
638
4042
# -*- coding: utf-8 -*- """ jinja2._compat ~~~~~~~~~~~~~~ Some py2/py3 compatibility support based on a stripped down version of six so we don't have to depend on a specific version of it. :copyright: Copyright 2013 by the Jinja team, see AUTHORS. :license: BSD, see LICENSE for details. """ import sys PY2 = sys.version_info[0] == 2 PYPY = hasattr(sys, 'pypy_translation_info') _identity = lambda x: x if not PY2: unichr = chr range_type = range text_type = str string_types = (str,) iterkeys = lambda d: iter(d.keys()) itervalues = lambda d: iter(d.values()) iteritems = lambda d: iter(d.items()) import pickle from io import BytesIO, StringIO NativeStringIO = StringIO def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value ifilter = filter imap = map izip = zip intern = sys.intern implements_iterator = _identity implements_to_string = _identity encode_filename = _identity get_next = lambda x: x.__next__ else: unichr = unichr text_type = unicode range_type = xrange string_types = (str, unicode) iterkeys = lambda d: d.iterkeys() itervalues = lambda d: d.itervalues() iteritems = lambda d: d.iteritems() import cPickle as pickle from cStringIO import StringIO as BytesIO, StringIO NativeStringIO = BytesIO exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') from itertools import imap, izip, ifilter intern = intern def implements_iterator(cls): cls.next = cls.__next__ del cls.__next__ return cls def implements_to_string(cls): cls.__unicode__ = cls.__str__ cls.__str__ = lambda x: x.__unicode__().encode('utf-8') return cls get_next = lambda x: x.next def encode_filename(filename): if isinstance(filename, unicode): return filename.encode('utf-8') return filename try: next = next except NameError: def next(it): return it.next() def with_metaclass(meta, *bases): # This requires a bit of explanation: the basic idea is to make a # dummy metaclass for one level of class instanciation that replaces # itself with the actual metaclass. Because of internal type checks # we also need to make sure that we downgrade the custom metaclass # for one level to something closer to type (that's why __call__ and # __init__ comes back from type etc.). # # This has the advantage over six.with_metaclass in that it does not # introduce dummy classes into the final MRO. class metaclass(meta): __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, d): if this_bases is None: return type.__new__(cls, name, (), d) return meta(name, bases, d) return metaclass('temporary_class', None, {}) try: from collections import Mapping as mapping_types except ImportError: import UserDict mapping_types = (UserDict.UserDict, UserDict.DictMixin, dict) # common types. These do exist in the special types module too which however # does not exist in IronPython out of the box. Also that way we don't have # to deal with implementation specific stuff here class _C(object): def method(self): pass def _func(): yield None function_type = type(_func) generator_type = type(_func()) method_type = type(_C().method) code_type = type(_C.method.__code__) try: raise TypeError() except TypeError: _tb = sys.exc_info()[2] traceback_type = type(_tb) frame_type = type(_tb.tb_frame) try: from urllib.parse import quote_from_bytes as url_quote except ImportError: from urllib import quote as url_quote try: from thread import allocate_lock except ImportError: try: from threading import Lock as allocate_lock except ImportError: from dummy_thread import allocate_lock
mit
codelucas/shorten.tv
bg.py
1
2146
#!/usr/bin/env python2.7 """ Here is shorten.tv's main background task to re-load and cache popular youtube videos so users have less wait time when using the webapp. """ import requests import string import backend import urllib letters = list(string.lowercase) # a, b, c ... z popular = ["Rihanna", "Usher", "Katy Perry", "Eminem", "Shakira", "Taylor Swift", "Akon", "Lady Gaga", "Paramore", "Jay Z", "Led Zepplin", "Guns N Roses", "Aerosmith", "Borat", "Fallout Boy", "Blink 182", "Justin Bieber", "Drake"] searches = letters + popular numb_thumbs = "5" numb_queries = 5 def encodeURIComponent(input_str): """ Python equivalent of javascript's encodeURIComponent """ return urllib.quote(unicode(input_str).encode('utf-8'), safe='~()*!.\'') def top_query(term): """ Retrieves top google autocompletion api query """ url = "http://suggestqueries.google.com/complete/search?" + \ "hl=en&ds=yt&client=youtube&json=t&q=" + \ encodeURIComponent(term) + "&cp=1" results = requests.get(url).json() queries = results[1][:5] print "Autocomplete results for", results[0], "are", queries return queries[0] # top query def youtube_top_five(query): """ Retrieves top five yotube video (ids) based on a google autocompelte query """ url = "http://gdata.youtube.com/feeds/api/videos?q=" + \ encodeURIComponent(query) + "&format=5&max-results=" + \ numb_thumbs + "&v=2&alt=jsonc" resp = requests.get(url).json() data = resp["data"] items = data["items"] ids = [video["id"] for video in items] return ids if __name__ == '__main__': for search in searches: query = top_query(search) ids = youtube_top_five(query) for yt_id in ids: clips, duration = backend.check_youtube(yt_id) yt_dat = {'hotclips': clips, 'duration': duration} backend.redis.setex(yt_id, yt_dat, backend.HOTCLIP_CACHE_TIME) print 'Summarization data cached for id', yt_id, \ '~~~~ hotclips:', clips, 'duration:', duration
mit
HiroIshikawa/21playground
voting/venv/lib/python3.5/site-packages/pip/_vendor/pkg_resources/__init__.py
252
106466
""" Package resource API -------------------- A resource is a logical file contained within a package, or a logical subdirectory thereof. The package resource API expects resource names to have their path parts separated with ``/``, *not* whatever the local path separator is. Do not use os.path operations to manipulate resource names being passed into the API. The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. """ from __future__ import absolute_import import sys import os import io import time import re import types import zipfile import zipimport import warnings import stat import functools import pkgutil import token import symbol import operator import platform import collections import plistlib import email.parser import tempfile import textwrap from pkgutil import get_importer try: import _imp except ImportError: # Python 3.2 compatibility import imp as _imp PY3 = sys.version_info > (3,) PY2 = not PY3 if PY3: from urllib.parse import urlparse, urlunparse if PY2: from urlparse import urlparse, urlunparse if PY3: string_types = str, else: string_types = str, eval('unicode') iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems() # capture these to bypass sandboxing from os import utime try: from os import mkdir, rename, unlink WRITE_SUPPORT = True except ImportError: # no write support, probably under GAE WRITE_SUPPORT = False from os import open as os_open from os.path import isdir, split # Avoid try/except due to potential problems with delayed import mechanisms. if sys.version_info >= (3, 3) and sys.implementation.name == "cpython": import importlib.machinery as importlib_machinery else: importlib_machinery = None try: import parser except ImportError: pass import pip._vendor.packaging.version import pip._vendor.packaging.specifiers packaging = pip._vendor.packaging # declare some globals that will be defined later to # satisfy the linters. require = None working_set = None class PEP440Warning(RuntimeWarning): """ Used when there is an issue with a version or specifier not complying with PEP 440. """ class _SetuptoolsVersionMixin(object): def __hash__(self): return super(_SetuptoolsVersionMixin, self).__hash__() def __lt__(self, other): if isinstance(other, tuple): return tuple(self) < other else: return super(_SetuptoolsVersionMixin, self).__lt__(other) def __le__(self, other): if isinstance(other, tuple): return tuple(self) <= other else: return super(_SetuptoolsVersionMixin, self).__le__(other) def __eq__(self, other): if isinstance(other, tuple): return tuple(self) == other else: return super(_SetuptoolsVersionMixin, self).__eq__(other) def __ge__(self, other): if isinstance(other, tuple): return tuple(self) >= other else: return super(_SetuptoolsVersionMixin, self).__ge__(other) def __gt__(self, other): if isinstance(other, tuple): return tuple(self) > other else: return super(_SetuptoolsVersionMixin, self).__gt__(other) def __ne__(self, other): if isinstance(other, tuple): return tuple(self) != other else: return super(_SetuptoolsVersionMixin, self).__ne__(other) def __getitem__(self, key): return tuple(self)[key] def __iter__(self): component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE) replace = { 'pre': 'c', 'preview': 'c', '-': 'final-', 'rc': 'c', 'dev': '@', }.get def _parse_version_parts(s): for part in component_re.split(s): part = replace(part, part) if not part or part == '.': continue if part[:1] in '0123456789': # pad for numeric comparison yield part.zfill(8) else: yield '*'+part # ensure that alpha/beta/candidate are before final yield '*final' def old_parse_version(s): parts = [] for part in _parse_version_parts(s.lower()): if part.startswith('*'): # remove '-' before a prerelease tag if part < '*final': while parts and parts[-1] == '*final-': parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1] == '00000000': parts.pop() parts.append(part) return tuple(parts) # Warn for use of this function warnings.warn( "You have iterated over the result of " "pkg_resources.parse_version. This is a legacy behavior which is " "inconsistent with the new version class introduced in setuptools " "8.0. In most cases, conversion to a tuple is unnecessary. For " "comparison of versions, sort the Version instances directly. If " "you have another use case requiring the tuple, please file a " "bug with the setuptools project describing that need.", RuntimeWarning, stacklevel=1, ) for part in old_parse_version(str(self)): yield part class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version): pass class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin, packaging.version.LegacyVersion): pass def parse_version(v): try: return SetuptoolsVersion(v) except packaging.version.InvalidVersion: return SetuptoolsLegacyVersion(v) _state_vars = {} def _declare_state(vartype, **kw): globals().update(kw) _state_vars.update(dict.fromkeys(kw, vartype)) def __getstate__(): state = {} g = globals() for k, v in _state_vars.items(): state[k] = g['_sget_'+v](g[k]) return state def __setstate__(state): g = globals() for k, v in state.items(): g['_sset_'+_state_vars[k]](k, g[k], v) return state def _sget_dict(val): return val.copy() def _sset_dict(key, ob, state): ob.clear() ob.update(state) def _sget_object(val): return val.__getstate__() def _sset_object(key, ob, state): ob.__setstate__(state) _sget_none = _sset_none = lambda *args: None def get_supported_platform(): """Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of Mac OS X that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of Mac OS X that we are *running*. To allow usage of packages that explicitly require a newer version of Mac OS X, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. """ plat = get_build_platform() m = macosVersionString.match(plat) if m is not None and sys.platform == "darwin": try: plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) except ValueError: # not Mac OS X pass return plat __all__ = [ # Basic resource access and distribution/entry point discovery 'require', 'run_script', 'get_provider', 'get_distribution', 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', 'resource_string', 'resource_stream', 'resource_filename', 'resource_listdir', 'resource_exists', 'resource_isdir', # Environmental control 'declare_namespace', 'working_set', 'add_activation_listener', 'find_distributions', 'set_extraction_path', 'cleanup_resources', 'get_default_cache', # Primary implementation classes 'Environment', 'WorkingSet', 'ResourceManager', 'Distribution', 'Requirement', 'EntryPoint', # Exceptions 'ResolutionError', 'VersionConflict', 'DistributionNotFound', 'UnknownExtra', 'ExtractionError', # Warnings 'PEP440Warning', # Parsing functions and string utilities 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', # filesystem utilities 'ensure_directory', 'normalize_path', # Distribution "precedence" constants 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', # "Provider" interfaces, implementations, and registration/lookup APIs 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', 'register_finder', 'register_namespace_handler', 'register_loader_type', 'fixup_namespace_packages', 'get_importer', # Deprecated/backward compatibility only 'run_main', 'AvailableDistributions', ] class ResolutionError(Exception): """Abstract base for dependency resolution errors""" def __repr__(self): return self.__class__.__name__+repr(self.args) class VersionConflict(ResolutionError): """ An already-installed version conflicts with the requested version. Should be initialized with the installed Distribution and the requested Requirement. """ _template = "{self.dist} is installed but {self.req} is required" @property def dist(self): return self.args[0] @property def req(self): return self.args[1] def report(self): return self._template.format(**locals()) def with_context(self, required_by): """ If required_by is non-empty, return a version of self that is a ContextualVersionConflict. """ if not required_by: return self args = self.args + (required_by,) return ContextualVersionConflict(*args) class ContextualVersionConflict(VersionConflict): """ A VersionConflict that accepts a third parameter, the set of the requirements that required the installed Distribution. """ _template = VersionConflict._template + ' by {self.required_by}' @property def required_by(self): return self.args[2] class DistributionNotFound(ResolutionError): """A requested distribution was not found""" _template = ("The '{self.req}' distribution was not found " "and is required by {self.requirers_str}") @property def req(self): return self.args[0] @property def requirers(self): return self.args[1] @property def requirers_str(self): if not self.requirers: return 'the application' return ', '.join(self.requirers) def report(self): return self._template.format(**locals()) def __str__(self): return self.report() class UnknownExtra(ResolutionError): """Distribution doesn't have an "extra feature" of the given name""" _provider_factories = {} PY_MAJOR = sys.version[:3] EGG_DIST = 3 BINARY_DIST = 2 SOURCE_DIST = 1 CHECKOUT_DIST = 0 DEVELOP_DIST = -1 def register_loader_type(loader_type, provider_factory): """Register `provider_factory` to make providers for `loader_type` `loader_type` is the type or class of a PEP 302 ``module.__loader__``, and `provider_factory` is a function that, passed a *module* object, returns an ``IResourceProvider`` for that module. """ _provider_factories[loader_type] = provider_factory def get_provider(moduleOrReq): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq, Requirement): return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] try: module = sys.modules[moduleOrReq] except KeyError: __import__(moduleOrReq) module = sys.modules[moduleOrReq] loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module) def _macosx_vers(_cache=[]): if not _cache: version = platform.mac_ver()[0] # fallback for MacPorts if version == '': plist = '/System/Library/CoreServices/SystemVersion.plist' if os.path.exists(plist): if hasattr(plistlib, 'readPlist'): plist_content = plistlib.readPlist(plist) if 'ProductVersion' in plist_content: version = plist_content['ProductVersion'] _cache.append(version.split('.')) return _cache[0] def _macosx_arch(machine): return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) def get_build_platform(): """Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it needs some hacks for Linux and Mac OS X. """ try: # Python 2.7 or >=3.2 from sysconfig import get_platform except ImportError: from distutils.util import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: version = _macosx_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), _macosx_arch(machine)) except ValueError: # if someone is running a non-Mac darwin system, this will fall # through to the default implementation pass return plat macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") # XXX backward compat get_platform = get_build_platform def compatible_platforms(provided, required): """Can code for the `provided` platform run on the `required` platform? Returns true if either platform is ``None``, or the platforms are equal. XXX Needs compatibility checks for Linux and other unixy OSes. """ if provided is None or required is None or provided==required: # easy case return True # Mac OS X special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) # is this a Mac package? if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will # use the new macosx designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) if dversion == 7 and macosversion >= "10.3" or \ dversion == 8 and macosversion >= "10.4": return True # egg isn't macosx or legacy darwin return False # are they the same major version and machine type? if provMac.group(1) != reqMac.group(1) or \ provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? if int(provMac.group(2)) > int(reqMac.group(2)): return False return True # XXX Linux and other platforms' special cases should go here return False def run_script(dist_spec, script_name): """Locate distribution `dist_spec` and run its `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name require(dist_spec)[0].run_script(script_name, ns) # backward compatibility run_main = run_script def get_distribution(dist): """Return a current distribution object for a Requirement or string""" if isinstance(dist, string_types): dist = Requirement.parse(dist) if isinstance(dist, Requirement): dist = get_provider(dist) if not isinstance(dist, Distribution): raise TypeError("Expected string, Requirement, or Distribution", dist) return dist def load_entry_point(dist, group, name): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name) def get_entry_map(dist, group=None): """Return the entry point map for `group`, or the full entry map""" return get_distribution(dist).get_entry_map(group) def get_entry_info(dist, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return get_distribution(dist).get_entry_info(group, name) class IMetadataProvider: def has_metadata(name): """Does the package's distribution contain the named metadata?""" def get_metadata(name): """The named metadata resource as a string""" def get_metadata_lines(name): """Yield named metadata resource as list of non-blank non-comment lines Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.""" def metadata_isdir(name): """Is the named metadata a directory? (like ``os.path.isdir()``)""" def metadata_listdir(name): """List of metadata names in the directory (like ``os.listdir()``)""" def run_script(script_name, namespace): """Execute the named script in the supplied namespace dictionary""" class IResourceProvider(IMetadataProvider): """An object that provides access to package resources""" def get_resource_filename(manager, resource_name): """Return a true filesystem path for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_stream(manager, resource_name): """Return a readable file-like object for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_string(manager, resource_name): """Return a string containing the contents of `resource_name` `manager` must be an ``IResourceManager``""" def has_resource(resource_name): """Does the package contain the named resource?""" def resource_isdir(resource_name): """Is the named resource a directory? (like ``os.path.isdir()``)""" def resource_listdir(resource_name): """List of resource names in the directory (like ``os.listdir()``)""" class WorkingSet(object): """A collection of active distributions on sys.path (or a similar list)""" def __init__(self, entries=None): """Create working set from list of path entries (default=sys.path)""" self.entries = [] self.entry_keys = {} self.by_key = {} self.callbacks = [] if entries is None: entries = sys.path for entry in entries: self.add_entry(entry) @classmethod def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except ImportError: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws @classmethod def _build_from_requirements(cls, req_spec): """ Build a working set from a requirement spec. Rewrites sys.path. """ # try it without defaults already on sys.path # by starting with an empty path ws = cls([]) reqs = parse_requirements(req_spec) dists = ws.resolve(reqs, Environment()) for dist in dists: ws.add(dist) # add any missing entries from sys.path for entry in sys.path: if entry not in ws.entries: ws.add_entry(entry) # then copy back to sys.path sys.path[:] = ws.entries return ws def add_entry(self, entry): """Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.) """ self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in find_distributions(entry, True): self.add(dist, entry, False) def __contains__(self, dist): """True if `dist` is the active distribution for its project""" return self.by_key.get(dist.key) == dist def find(self, req): """Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. """ dist = self.by_key.get(req.key) if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) return dist def iter_entry_points(self, group, name=None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ for dist in self: entries = dist.get_entry_map(group) if name is None: for ep in entries.values(): yield ep elif name in entries: yield entries[name] def run_script(self, requires, script_name): """Locate distribution for `requires` and run `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name self.require(requires)[0].run_script(script_name, ns) def __iter__(self): """Yield distributions for non-duplicate projects in the working set The yield order is the order in which the items' path entries were added to the working set. """ seen = {} for item in self.entries: if item not in self.entry_keys: # workaround a cache issue continue for key in self.entry_keys[item]: if key not in seen: seen[key]=1 yield self.by_key[key] def add(self, dist, entry=None, insert=True, replace=False): """Add `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set's ``.entries`` (if it wasn't already present). `dist` is only added to the working set if it's for a project that doesn't already have a distribution in the set, unless `replace=True`. If it's added, any callbacks registered with the ``subscribe()`` method will be called. """ if insert: dist.insert_on(self.entries, entry) if entry is None: entry = dist.location keys = self.entry_keys.setdefault(entry,[]) keys2 = self.entry_keys.setdefault(dist.location,[]) if not replace and dist.key in self.by_key: # ignore hidden distros return self.by_key[dist.key] = dist if dist.key not in keys: keys.append(dist.key) if dist.key not in keys2: keys2.append(dist.key) self._added_new(dist) def resolve(self, requirements, env=None, installer=None, replace_conflicting=False): """List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, if supplied, should be an ``Environment`` instance. If not supplied, it defaults to all distributions available within any entry or distribution in the working set. `installer`, if supplied, will be invoked with each requirement that cannot be met by an already-installed distribution; it should return a ``Distribution`` or ``None``. Unless `replace_conflicting=True`, raises a VersionConflict exception if any requirements are found on the path that have the correct name but the wrong version. Otherwise, if an `installer` is supplied it will be invoked to obtain the correct version of the requirement and activate it. """ # set up the stack requirements = list(requirements)[::-1] # set of processed requirements processed = {} # key -> dist best = {} to_activate = [] # Mapping of requirement to set of distributions that required it; # useful for reporting info about conflicts. required_by = collections.defaultdict(set) while requirements: # process dependencies breadth-first req = requirements.pop(0) if req in processed: # Ignore cyclic or redundant dependencies continue dist = best.get(req.key) if dist is None: # Find the best distribution and add it to the map dist = self.by_key.get(req.key) if dist is None or (dist not in req and replace_conflicting): ws = self if env is None: if dist is None: env = Environment(self.entries) else: # Use an empty environment and workingset to avoid # any further conflicts with the conflicting # distribution env = Environment([]) ws = WorkingSet([]) dist = best[req.key] = env.best_match(req, ws, installer) if dist is None: requirers = required_by.get(req, None) raise DistributionNotFound(req, requirers) to_activate.append(dist) if dist not in req: # Oops, the "best" so far conflicts with a dependency dependent_req = required_by[req] raise VersionConflict(dist, req).with_context(dependent_req) # push the new requirements onto the stack new_requirements = dist.requires(req.extras)[::-1] requirements.extend(new_requirements) # Register the new requirements needed by req for new_requirement in new_requirements: required_by[new_requirement].add(req.project_name) processed[req] = True # return list of distros to activate return to_activate def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: distributions, errors = working_set.find_plugins( Environment(plugin_dirlist) ) # add plugins+libs to sys.path map(working_set.add, distributions) # display errors print('Could not load', errors) The `plugin_env` should be an ``Environment`` instance that contains only distributions that are in the project's "plugin directory" or directories. The `full_env`, if supplied, should be an ``Environment`` contains all currently-available distributions. If `full_env` is not supplied, one is created automatically from the ``WorkingSet`` this method is called on, which will typically mean that every directory on ``sys.path`` will be scanned for distributions. `installer` is a standard installer callback as used by the ``resolve()`` method. The `fallback` flag indicates whether we should attempt to resolve older versions of a plugin if the newest version cannot be resolved. This method returns a 2-tuple: (`distributions`, `error_info`), where `distributions` is a list of the distributions found in `plugin_env` that were loadable, along with any other distributions that are needed to resolve their dependencies. `error_info` is a dictionary mapping unloadable plugin distributions to an exception instance describing the error that occurred. Usually this will be a ``DistributionNotFound`` or ``VersionConflict`` instance. """ plugin_projects = list(plugin_env) # scan project names in alphabetic order plugin_projects.sort() error_info = {} distributions = {} if full_env is None: env = Environment(self.entries) env += plugin_env else: env = full_env + plugin_env shadow_set = self.__class__([]) # put all our entries in shadow_set list(map(shadow_set.add, self)) for project_name in plugin_projects: for dist in plugin_env[project_name]: req = [dist.as_requirement()] try: resolvees = shadow_set.resolve(req, env, installer) except ResolutionError as v: # save error info error_info[dist] = v if fallback: # try the next older version of project continue else: # give up on this project, keep going break else: list(map(shadow_set.add, resolvees)) distributions.update(dict.fromkeys(resolvees)) # success, no need to try any more versions of this project break distributions = list(distributions) distributions.sort() return distributions, error_info def require(self, *requirements): """Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set. """ needed = self.resolve(parse_requirements(requirements)) for dist in needed: self.add(dist) return needed def subscribe(self, callback): """Invoke `callback` for all distributions (including existing ones)""" if callback in self.callbacks: return self.callbacks.append(callback) for dist in self: callback(dist) def _added_new(self, dist): for callback in self.callbacks: callback(dist) def __getstate__(self): return ( self.entries[:], self.entry_keys.copy(), self.by_key.copy(), self.callbacks[:] ) def __setstate__(self, e_k_b_c): entries, keys, by_key, callbacks = e_k_b_c self.entries = entries[:] self.entry_keys = keys.copy() self.by_key = by_key.copy() self.callbacks = callbacks[:] class Environment(object): """Searchable snapshot of distributions on a search path""" def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): """Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``'3.3'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version. """ self._distmap = {} self.platform = platform self.python = python self.scan(search_path) def can_add(self, dist): """Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned. """ return (self.python is None or dist.py_version is None or dist.py_version==self.python) \ and compatible_platforms(dist.platform, self.platform) def remove(self, dist): """Remove `dist` from the environment""" self._distmap[dist.key].remove(dist) def scan(self, search_path=None): """Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added. """ if search_path is None: search_path = sys.path for item in search_path: for dist in find_distributions(item): self.add(dist) def __getitem__(self, project_name): """Return a newest-to-oldest list of distributions for `project_name` Uses case-insensitive `project_name` comparison, assuming all the project's distributions use their project's name converted to all lowercase as their key. """ distribution_key = project_name.lower() return self._distmap.get(distribution_key, []) def add(self, dist): """Add `dist` if we ``can_add()`` it and it has not already been added """ if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) if dist not in dists: dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) def best_match(self, req, working_set, installer=None): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn't active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment's ``obtain(req, installer)`` method will be returned. """ dist = working_set.find(req) if dist is not None: return dist for dist in self[req.key]: if dist in req: return dist # try to download/install return self.obtain(req, installer) def obtain(self, requirement, installer=None): """Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.""" if installer is not None: return installer(requirement) def __iter__(self): """Yield the unique project names of the available distributions""" for key in self._distmap.keys(): if self[key]: yield key def __iadd__(self, other): """In-place addition of a distribution or environment""" if isinstance(other, Distribution): self.add(other) elif isinstance(other, Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError("Can't add %r to environment" % (other,)) return self def __add__(self, other): """Add an environment or distribution to an environment""" new = self.__class__([], platform=None, python=None) for env in self, other: new += env return new # XXX backward compatibility AvailableDistributions = Environment class ExtractionError(RuntimeError): """An error occurred extracting a resource The following attributes are available from instances of this exception: manager The resource manager that raised this exception cache_path The base directory for resource extraction original_error The exception instance that caused extraction to fail """ class ResourceManager: """Manage resource extraction and packages""" extraction_path = None def __init__(self): self.cached_files = {} def resource_exists(self, package_or_requirement, resource_name): """Does the named resource exist?""" return get_provider(package_or_requirement).has_resource(resource_name) def resource_isdir(self, package_or_requirement, resource_name): """Is the named resource an existing directory?""" return get_provider(package_or_requirement).resource_isdir( resource_name ) def resource_filename(self, package_or_requirement, resource_name): """Return a true filesystem path for specified resource""" return get_provider(package_or_requirement).get_resource_filename( self, resource_name ) def resource_stream(self, package_or_requirement, resource_name): """Return a readable file-like object for specified resource""" return get_provider(package_or_requirement).get_resource_stream( self, resource_name ) def resource_string(self, package_or_requirement, resource_name): """Return specified resource as a string""" return get_provider(package_or_requirement).get_resource_string( self, resource_name ) def resource_listdir(self, package_or_requirement, resource_name): """List the contents of the named resource directory""" return get_provider(package_or_requirement).resource_listdir( resource_name ) def extraction_error(self): """Give an error message for problems extracting file(s)""" old_exc = sys.exc_info()[1] cache_path = self.extraction_path or get_default_cache() err = ExtractionError("""Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: %s The Python egg cache directory is currently set to: %s Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. """ % (old_exc, cache_path) ) err.manager = self err.cache_path = cache_path err.original_error = old_exc raise err def get_cache_path(self, archive_name, names=()): """Return absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg's extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later. """ extract_path = self.extraction_path or get_default_cache() target_path = os.path.join(extract_path, archive_name+'-tmp', *names) try: _bypass_ensure_directory(target_path) except: self.extraction_error() self._warn_unsafe_extraction_path(extract_path) self.cached_files[target_path] = 1 return target_path @staticmethod def _warn_unsafe_extraction_path(path): """ If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details. """ if os.name == 'nt' and not path.startswith(os.environ['windir']): # On Windows, permissions are generally restrictive by default # and temp directories are not writable by other users, so # bypass the warning. return mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: msg = ("%s is writable by group/others and vulnerable to attack " "when " "used with get_resource_filename. Consider a more secure " "location (set with .set_extraction_path or the " "PYTHON_EGG_CACHE environment variable)." % path) warnings.warn(msg, UserWarning) def postprocess(self, tempname, filename): """Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns. """ if os.name == 'posix': # Make the resource executable mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 os.chmod(tempname, mode) def set_extraction_path(self, path): """Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.) """ if self.cached_files: raise ValueError( "Can't change extraction path, files already extracted" ) self.extraction_path = path def cleanup_resources(self, force=False): """ Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. This function does not have any concurrency protection, so it should generally only be called when the extraction path is a temporary directory exclusive to a single process. This method is not automatically called; you must call it explicitly or register it as an ``atexit`` function if you wish to ensure cleanup of a temporary directory used for extractions. """ # XXX def get_default_cache(): """Determine the default cache location This returns the ``PYTHON_EGG_CACHE`` environment variable, if set. Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the "Application Data" directory. On all other systems, it's "~/.python-eggs". """ try: return os.environ['PYTHON_EGG_CACHE'] except KeyError: pass if os.name!='nt': return os.path.expanduser('~/.python-eggs') # XXX this may be locale-specific! app_data = 'Application Data' app_homes = [ # best option, should be locale-safe (('APPDATA',), None), (('USERPROFILE',), app_data), (('HOMEDRIVE','HOMEPATH'), app_data), (('HOMEPATH',), app_data), (('HOME',), None), # 95/98/ME (('WINDIR',), app_data), ] for keys, subdir in app_homes: dirname = '' for key in keys: if key in os.environ: dirname = os.path.join(dirname, os.environ[key]) else: break else: if subdir: dirname = os.path.join(dirname, subdir) return os.path.join(dirname, 'Python-Eggs') else: raise RuntimeError( "Please set the PYTHON_EGG_CACHE enviroment variable" ) def safe_name(name): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. """ return re.sub('[^A-Za-z0-9.]+', '-', name) def safe_version(version): """ Convert an arbitrary string to a standard version string """ try: # normalize the version return str(packaging.version.Version(version)) except packaging.version.InvalidVersion: version = version.replace(' ','.') return re.sub('[^A-Za-z0-9.]+', '-', version) def safe_extra(extra): """Convert an arbitrary string to a standard 'extra' name Any runs of non-alphanumeric characters are replaced with a single '_', and the result is always lowercased. """ return re.sub('[^A-Za-z0-9.]+', '_', extra).lower() def to_filename(name): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace('-','_') class MarkerEvaluation(object): values = { 'os_name': lambda: os.name, 'sys_platform': lambda: sys.platform, 'python_full_version': platform.python_version, 'python_version': lambda: platform.python_version()[:3], 'platform_version': platform.version, 'platform_machine': platform.machine, 'python_implementation': platform.python_implementation, } @classmethod def is_invalid_marker(cls, text): """ Validate text as a PEP 426 environment marker; return an exception if invalid or False otherwise. """ try: cls.evaluate_marker(text) except SyntaxError as e: return cls.normalize_exception(e) return False @staticmethod def normalize_exception(exc): """ Given a SyntaxError from a marker evaluation, normalize the error message: - Remove indications of filename and line number. - Replace platform-specific error messages with standard error messages. """ subs = { 'unexpected EOF while parsing': 'invalid syntax', 'parenthesis is never closed': 'invalid syntax', } exc.filename = None exc.lineno = None exc.msg = subs.get(exc.msg, exc.msg) return exc @classmethod def and_test(cls, nodelist): # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! items = [ cls.interpret(nodelist[i]) for i in range(1, len(nodelist), 2) ] return functools.reduce(operator.and_, items) @classmethod def test(cls, nodelist): # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! items = [ cls.interpret(nodelist[i]) for i in range(1, len(nodelist), 2) ] return functools.reduce(operator.or_, items) @classmethod def atom(cls, nodelist): t = nodelist[1][0] if t == token.LPAR: if nodelist[2][0] == token.RPAR: raise SyntaxError("Empty parentheses") return cls.interpret(nodelist[2]) msg = "Language feature not supported in environment markers" raise SyntaxError(msg) @classmethod def comparison(cls, nodelist): if len(nodelist) > 4: msg = "Chained comparison not allowed in environment markers" raise SyntaxError(msg) comp = nodelist[2][1] cop = comp[1] if comp[0] == token.NAME: if len(nodelist[2]) == 3: if cop == 'not': cop = 'not in' else: cop = 'is not' try: cop = cls.get_op(cop) except KeyError: msg = repr(cop) + " operator not allowed in environment markers" raise SyntaxError(msg) return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3])) @classmethod def get_op(cls, op): ops = { symbol.test: cls.test, symbol.and_test: cls.and_test, symbol.atom: cls.atom, symbol.comparison: cls.comparison, 'not in': lambda x, y: x not in y, 'in': lambda x, y: x in y, '==': operator.eq, '!=': operator.ne, '<': operator.lt, '>': operator.gt, '<=': operator.le, '>=': operator.ge, } if hasattr(symbol, 'or_test'): ops[symbol.or_test] = cls.test return ops[op] @classmethod def evaluate_marker(cls, text, extra=None): """ Evaluate a PEP 426 environment marker on CPython 2.4+. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. This implementation uses the 'parser' module, which is not implemented on Jython and has been superseded by the 'ast' module in Python 2.6 and later. """ return cls.interpret(parser.expr(text).totuple(1)[1]) @classmethod def _markerlib_evaluate(cls, text): """ Evaluate a PEP 426 environment marker using markerlib. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. """ from pip._vendor import _markerlib # markerlib implements Metadata 1.2 (PEP 345) environment markers. # Translate the variables to Metadata 2.0 (PEP 426). env = _markerlib.default_environment() for key in env.keys(): new_key = key.replace('.', '_') env[new_key] = env.pop(key) try: result = _markerlib.interpret(text, env) except NameError as e: raise SyntaxError(e.args[0]) return result if 'parser' not in globals(): # Fall back to less-complete _markerlib implementation if 'parser' module # is not available. evaluate_marker = _markerlib_evaluate @classmethod def interpret(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] try: op = cls.get_op(nodelist[0]) except KeyError: raise SyntaxError("Comparison or logical expression expected") return op(nodelist) @classmethod def evaluate(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] kind = nodelist[0] name = nodelist[1] if kind==token.NAME: try: op = cls.values[name] except KeyError: raise SyntaxError("Unknown name %r" % name) return op() if kind==token.STRING: s = nodelist[1] if not cls._safe_string(s): raise SyntaxError( "Only plain strings allowed in environment markers") return s[1:-1] msg = "Language feature not supported in environment markers" raise SyntaxError(msg) @staticmethod def _safe_string(cand): return ( cand[:1] in "'\"" and not cand.startswith('"""') and not cand.startswith("'''") and '\\' not in cand ) invalid_marker = MarkerEvaluation.is_invalid_marker evaluate_marker = MarkerEvaluation.evaluate_marker class NullProvider: """Try to implement resources and metadata for arbitrary PEP 302 loaders""" egg_name = None egg_info = None loader = None def __init__(self, module): self.loader = getattr(module, '__loader__', None) self.module_path = os.path.dirname(getattr(module, '__file__', '')) def get_resource_filename(self, manager, resource_name): return self._fn(self.module_path, resource_name) def get_resource_stream(self, manager, resource_name): return io.BytesIO(self.get_resource_string(manager, resource_name)) def get_resource_string(self, manager, resource_name): return self._get(self._fn(self.module_path, resource_name)) def has_resource(self, resource_name): return self._has(self._fn(self.module_path, resource_name)) def has_metadata(self, name): return self.egg_info and self._has(self._fn(self.egg_info, name)) if sys.version_info <= (3,): def get_metadata(self, name): if not self.egg_info: return "" return self._get(self._fn(self.egg_info, name)) else: def get_metadata(self, name): if not self.egg_info: return "" return self._get(self._fn(self.egg_info, name)).decode("utf-8") def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) def resource_isdir(self, resource_name): return self._isdir(self._fn(self.module_path, resource_name)) def metadata_isdir(self, name): return self.egg_info and self._isdir(self._fn(self.egg_info, name)) def resource_listdir(self, resource_name): return self._listdir(self._fn(self.module_path, resource_name)) def metadata_listdir(self, name): if self.egg_info: return self._listdir(self._fn(self.egg_info, name)) return [] def run_script(self, script_name, namespace): script = 'scripts/'+script_name if not self.has_metadata(script): raise ResolutionError("No script named %r" % script_name) script_text = self.get_metadata(script).replace('\r\n', '\n') script_text = script_text.replace('\r', '\n') script_filename = self._fn(self.egg_info, script) namespace['__file__'] = script_filename if os.path.exists(script_filename): source = open(script_filename).read() code = compile(source, script_filename, 'exec') exec(code, namespace, namespace) else: from linecache import cache cache[script_filename] = ( len(script_text), 0, script_text.split('\n'), script_filename ) script_code = compile(script_text, script_filename,'exec') exec(script_code, namespace, namespace) def _has(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _isdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _listdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _fn(self, base, resource_name): if resource_name: return os.path.join(base, *resource_name.split('/')) return base def _get(self, path): if hasattr(self.loader, 'get_data'): return self.loader.get_data(path) raise NotImplementedError( "Can't perform this operation for loaders without 'get_data()'" ) register_loader_type(object, NullProvider) class EggProvider(NullProvider): """Provider based on a virtual filesystem""" def __init__(self, module): NullProvider.__init__(self, module) self._setup_prefix() def _setup_prefix(self): # we assume here that our metadata may be nested inside a "basket" # of multiple eggs; that's why we use module_path instead of .archive path = self.module_path old = None while path!=old: if path.lower().endswith('.egg'): self.egg_name = os.path.basename(path) self.egg_info = os.path.join(path, 'EGG-INFO') self.egg_root = path break old = path path, base = os.path.split(path) class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" def _has(self, path): return os.path.exists(path) def _isdir(self, path): return os.path.isdir(path) def _listdir(self, path): return os.listdir(path) def get_resource_stream(self, manager, resource_name): return open(self._fn(self.module_path, resource_name), 'rb') def _get(self, path): with open(path, 'rb') as stream: return stream.read() register_loader_type(type(None), DefaultProvider) if importlib_machinery is not None: register_loader_type(importlib_machinery.SourceFileLoader, DefaultProvider) class EmptyProvider(NullProvider): """Provider that returns nothing for all requests""" _isdir = _has = lambda self, path: False _get = lambda self, path: '' _listdir = lambda self, path: [] module_path = None def __init__(self): pass empty_provider = EmptyProvider() class ZipManifests(dict): """ zip manifest builder """ @classmethod def build(cls, path): """ Build a dictionary similar to the zipimport directory caches, except instead of tuples, store ZipInfo objects. Use a platform-specific path separator (os.sep) for the path keys for compatibility with pypy on Windows. """ with ContextualZipFile(path) as zfile: items = ( ( name.replace('/', os.sep), zfile.getinfo(name), ) for name in zfile.namelist() ) return dict(items) load = build class MemoizedZipManifests(ZipManifests): """ Memoized zipfile manifests. """ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') def load(self, path): """ Load a manifest at path or return a suitable manifest already loaded. """ path = os.path.normpath(path) mtime = os.stat(path).st_mtime if path not in self or self[path].mtime != mtime: manifest = self.build(path) self[path] = self.manifest_mod(manifest, mtime) return self[path].manifest class ContextualZipFile(zipfile.ZipFile): """ Supplement ZipFile class to support context manager for Python 2.6 """ def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def __new__(cls, *args, **kwargs): """ Construct a ZipFile or ContextualZipFile as appropriate """ if hasattr(zipfile.ZipFile, '__exit__'): return zipfile.ZipFile(*args, **kwargs) return super(ContextualZipFile, cls).__new__(cls) class ZipProvider(EggProvider): """Resource support for zips and eggs""" eagers = None _zip_manifests = MemoizedZipManifests() def __init__(self, module): EggProvider.__init__(self, module) self.zip_pre = self.loader.archive+os.sep def _zipinfo_name(self, fspath): # Convert a virtual filename (full path to file) into a zipfile subpath # usable with the zipimport directory cache for our target archive if fspath.startswith(self.zip_pre): return fspath[len(self.zip_pre):] raise AssertionError( "%s is not a subpath of %s" % (fspath, self.zip_pre) ) def _parts(self, zip_path): # Convert a zipfile subpath into an egg-relative path part list. # pseudo-fs path fspath = self.zip_pre+zip_path if fspath.startswith(self.egg_root+os.sep): return fspath[len(self.egg_root)+1:].split(os.sep) raise AssertionError( "%s is not a subpath of %s" % (fspath, self.egg_root) ) @property def zipinfo(self): return self._zip_manifests.load(self.loader.archive) def get_resource_filename(self, manager, resource_name): if not self.egg_name: raise NotImplementedError( "resource_filename() only supported for .egg, not .zip" ) # no need to lock for extraction, since we use temp names zip_path = self._resource_to_zip(resource_name) eagers = self._get_eager_resources() if '/'.join(self._parts(zip_path)) in eagers: for name in eagers: self._extract_resource(manager, self._eager_to_zip(name)) return self._extract_resource(manager, zip_path) @staticmethod def _get_date_and_size(zip_stat): size = zip_stat.file_size # ymdhms+wday, yday, dst date_time = zip_stat.date_time + (0, 0, -1) # 1980 offset already done timestamp = time.mktime(date_time) return timestamp, size def _extract_resource(self, manager, zip_path): if zip_path in self._index(): for name in self._index()[zip_path]: last = self._extract_resource( manager, os.path.join(zip_path, name) ) # return the extracted directory name return os.path.dirname(last) timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not WRITE_SUPPORT: raise IOError('"os.rename" and "os.unlink" are not supported ' 'on this platform') try: real_path = manager.get_cache_path( self.egg_name, self._parts(zip_path) ) if self._is_current(real_path, zip_path): return real_path outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path)) os.write(outf, self.loader.get_data(zip_path)) os.close(outf) utime(tmpnam, (timestamp, timestamp)) manager.postprocess(tmpnam, real_path) try: rename(tmpnam, real_path) except os.error: if os.path.isfile(real_path): if self._is_current(real_path, zip_path): # the file became current since it was checked above, # so proceed. return real_path # Windows, del old file and retry elif os.name=='nt': unlink(real_path) rename(tmpnam, real_path) return real_path raise except os.error: # report a user-friendly error manager.extraction_error() return real_path def _is_current(self, file_path, zip_path): """ Return True if the file_path is current for this zip_path """ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not os.path.isfile(file_path): return False stat = os.stat(file_path) if stat.st_size!=size or stat.st_mtime!=timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) with open(file_path, 'rb') as f: file_contents = f.read() return zip_contents == file_contents def _get_eager_resources(self): if self.eagers is None: eagers = [] for name in ('native_libs.txt', 'eager_resources.txt'): if self.has_metadata(name): eagers.extend(self.get_metadata_lines(name)) self.eagers = eagers return self.eagers def _index(self): try: return self._dirindex except AttributeError: ind = {} for path in self.zipinfo: parts = path.split(os.sep) while parts: parent = os.sep.join(parts[:-1]) if parent in ind: ind[parent].append(parts[-1]) break else: ind[parent] = [parts.pop()] self._dirindex = ind return ind def _has(self, fspath): zip_path = self._zipinfo_name(fspath) return zip_path in self.zipinfo or zip_path in self._index() def _isdir(self, fspath): return self._zipinfo_name(fspath) in self._index() def _listdir(self, fspath): return list(self._index().get(self._zipinfo_name(fspath), ())) def _eager_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.egg_root, resource_name)) def _resource_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.module_path, resource_name)) register_loader_type(zipimport.zipimporter, ZipProvider) class FileMetadata(EmptyProvider): """Metadata handler for standalone PKG-INFO files Usage:: metadata = FileMetadata("/path/to/PKG-INFO") This provider rejects all data and metadata requests except for PKG-INFO, which is treated as existing, and will be the contents of the file at the provided location. """ def __init__(self, path): self.path = path def has_metadata(self, name): return name=='PKG-INFO' def get_metadata(self, name): if name=='PKG-INFO': with open(self.path,'rU') as f: metadata = f.read() return metadata raise KeyError("No metadata except PKG-INFO is available") def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) class PathMetadata(DefaultProvider): """Metadata provider for egg directories Usage:: # Development eggs: egg_info = "/path/to/PackageName.egg-info" base_dir = os.path.dirname(egg_info) metadata = PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] dist = Distribution(basedir, project_name=dist_name, metadata=metadata) # Unpacked egg directories: egg_path = "/path/to/PackageName-ver-pyver-etc.egg" metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) dist = Distribution.from_filename(egg_path, metadata=metadata) """ def __init__(self, path, egg_info): self.module_path = path self.egg_info = egg_info class EggMetadata(ZipProvider): """Metadata provider for .egg files""" def __init__(self, importer): """Create a metadata provider from a zipimporter""" self.zip_pre = importer.archive+os.sep self.loader = importer if importer.prefix: self.module_path = os.path.join(importer.archive, importer.prefix) else: self.module_path = importer.archive self._setup_prefix() _declare_state('dict', _distribution_finders = {}) def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `distribution_finder` is a callable that, passed a path item and the importer instance, yields ``Distribution`` instances found on that path item. See ``pkg_resources.find_on_path`` for an example.""" _distribution_finders[importer_type] = distribution_finder def find_distributions(path_item, only=False): """Yield distributions accessible via `path_item`""" importer = get_importer(path_item) finder = _find_adapter(_distribution_finders, importer) return finder(importer, path_item, only) def find_eggs_in_zip(importer, path_item, only=False): """ Find eggs in zip files; possibly multiple nested eggs. """ if importer.archive.endswith('.whl'): # wheels are not supported with this finder # they don't have PKG-INFO metadata, and won't ever contain eggs return metadata = EggMetadata(importer) if metadata.has_metadata('PKG-INFO'): yield Distribution.from_filename(path_item, metadata=metadata) if only: # don't yield nested distros return for subitem in metadata.resource_listdir('/'): if subitem.endswith('.egg'): subpath = os.path.join(path_item, subitem) for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath): yield dist register_finder(zipimport.zipimporter, find_eggs_in_zip) def find_nothing(importer, path_item, only=False): return () register_finder(object, find_nothing) def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) if os.path.isdir(path_item) and os.access(path_item, os.R_OK): if path_item.lower().endswith('.egg'): # unpacked egg yield Distribution.from_filename( path_item, metadata=PathMetadata( path_item, os.path.join(path_item,'EGG-INFO') ) ) else: # scan for .egg and .egg-info in directory for entry in os.listdir(path_item): lower = entry.lower() if lower.endswith('.egg-info') or lower.endswith('.dist-info'): fullpath = os.path.join(path_item, entry) if os.path.isdir(fullpath): # egg-info directory, allow getting metadata metadata = PathMetadata(path_item, fullpath) else: metadata = FileMetadata(fullpath) yield Distribution.from_location( path_item, entry, metadata, precedence=DEVELOP_DIST ) elif not only and lower.endswith('.egg'): dists = find_distributions(os.path.join(path_item, entry)) for dist in dists: yield dist elif not only and lower.endswith('.egg-link'): with open(os.path.join(path_item, entry)) as entry_file: entry_lines = entry_file.readlines() for line in entry_lines: if not line.strip(): continue path = os.path.join(path_item, line.rstrip()) dists = find_distributions(path) for item in dists: yield item break register_finder(pkgutil.ImpImporter, find_on_path) if importlib_machinery is not None: register_finder(importlib_machinery.FileFinder, find_on_path) _declare_state('dict', _namespace_handlers={}) _declare_state('dict', _namespace_packages={}) def register_namespace_handler(importer_type, namespace_handler): """Register `namespace_handler` to declare namespace packages `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `namespace_handler` is a callable like this:: def namespace_handler(importer, path_entry, moduleName, module): # return a path_entry to use for child packages Namespace handlers are only called if the importer object has already agreed that it can handle the relevant path item, and they should only return a subpath if the module __path__ does not already contain an equivalent subpath. For an example namespace handler, see ``pkg_resources.file_ns_handler``. """ _namespace_handlers[importer_type] = namespace_handler def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" importer = get_importer(path_item) if importer is None: return None loader = importer.find_module(packageName) if loader is None: return None module = sys.modules.get(packageName) if module is None: module = sys.modules[packageName] = types.ModuleType(packageName) module.__path__ = [] _set_parent_ns(packageName) elif not hasattr(module,'__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) if subpath is not None: path = module.__path__ path.append(subpath) loader.load_module(packageName) for path_item in path: if path_item not in module.__path__: module.__path__.append(path_item) return subpath def declare_namespace(packageName): """Declare that package 'packageName' is a namespace package""" _imp.acquire_lock() try: if packageName in _namespace_packages: return path, parent = sys.path, None if '.' in packageName: parent = '.'.join(packageName.split('.')[:-1]) declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) try: path = sys.modules[parent].__path__ except AttributeError: raise TypeError("Not a package:", parent) # Track what packages are namespaces, so when new path items are added, # they can be updated _namespace_packages.setdefault(parent,[]).append(packageName) _namespace_packages.setdefault(packageName,[]) for path_item in path: # Ensure all the parent's path items are reflected in the child, # if they apply _handle_ns(packageName, path_item) finally: _imp.release_lock() def fixup_namespace_packages(path_item, parent=None): """Ensure that previously-declared namespace packages include path_item""" _imp.acquire_lock() try: for package in _namespace_packages.get(parent,()): subpath = _handle_ns(package, path_item) if subpath: fixup_namespace_packages(subpath, package) finally: _imp.release_lock() def file_ns_handler(importer, path_item, packageName, module): """Compute an ns-package subpath for a filesystem or zipfile importer""" subpath = os.path.join(path_item, packageName.split('.')[-1]) normalized = _normalize_cached(subpath) for item in module.__path__: if _normalize_cached(item)==normalized: break else: # Only return the path if it's not already there return subpath register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) register_namespace_handler(zipimport.zipimporter, file_ns_handler) if importlib_machinery is not None: register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) def null_ns_handler(importer, path_item, packageName, module): return None register_namespace_handler(object, null_ns_handler) def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(filename)) def _normalize_cached(filename, _cache={}): try: return _cache[filename] except KeyError: _cache[filename] = result = normalize_path(filename) return result def _set_parent_ns(packageName): parts = packageName.split('.') name = parts.pop() if parts: parent = '.'.join(parts) setattr(sys.modules[parent], name, sys.modules[packageName]) def yield_lines(strs): """Yield non-empty/non-comment lines of a string or sequence""" if isinstance(strs, string_types): for s in strs.splitlines(): s = s.strip() # skip blank lines/comments if s and not s.startswith('#'): yield s else: for ss in strs: for s in yield_lines(ss): yield s # whitespace and comment LINE_END = re.compile(r"\s*(#.*)?$").match # line continuation CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # Distribution or extra DISTRO = re.compile(r"\s*((\w|[-.])+)").match # ver. info VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match # comma between items COMMA = re.compile(r"\s*,").match OBRACKET = re.compile(r"\s*\[").match CBRACKET = re.compile(r"\s*\]").match MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r""" (?P<name>[^-]+) ( -(?P<ver>[^-]+) ( -py(?P<pyver>[^-]+) ( -(?P<plat>.+) )? )? )? """, re.VERBOSE | re.IGNORECASE, ).match class EntryPoint(object): """Object representing an advertised importable object""" def __init__(self, name, module_name, attrs=(), extras=(), dist=None): if not MODULE(module_name): raise ValueError("Invalid module name", module_name) self.name = name self.module_name = module_name self.attrs = tuple(attrs) self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras self.dist = dist def __str__(self): s = "%s = %s" % (self.name, self.module_name) if self.attrs: s += ':' + '.'.join(self.attrs) if self.extras: s += ' [%s]' % ','.join(self.extras) return s def __repr__(self): return "EntryPoint.parse(%r)" % str(self) def load(self, require=True, *args, **kwargs): """ Require packages for this EntryPoint, then resolve it. """ if not require or args or kwargs: warnings.warn( "Parameters to load are deprecated. Call .resolve and " ".require separately.", DeprecationWarning, stacklevel=2, ) if require: self.require(*args, **kwargs) return self.resolve() def resolve(self): """ Resolve the entry point from its module and attrs. """ module = __import__(self.module_name, fromlist=['__name__'], level=0) try: return functools.reduce(getattr, self.attrs, module) except AttributeError as exc: raise ImportError(str(exc)) def require(self, env=None, installer=None): if self.extras and not self.dist: raise UnknownExtra("Can't require() without a distribution", self) reqs = self.dist.requires(self.extras) items = working_set.resolve(reqs, env, installer) list(map(working_set.add, items)) pattern = re.compile( r'\s*' r'(?P<name>.+?)\s*' r'=\s*' r'(?P<module>[\w.]+)\s*' r'(:\s*(?P<attr>[\w.]+))?\s*' r'(?P<extras>\[.*\])?\s*$' ) @classmethod def parse(cls, src, dist=None): """Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1, extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional """ m = cls.pattern.match(src) if not m: msg = "EntryPoint must be in 'name=module:attrs [extras]' format" raise ValueError(msg, src) res = m.groupdict() extras = cls._parse_extras(res['extras']) attrs = res['attr'].split('.') if res['attr'] else () return cls(res['name'], res['module'], attrs, extras, dist) @classmethod def _parse_extras(cls, extras_spec): if not extras_spec: return () req = Requirement.parse('x' + extras_spec) if req.specs: raise ValueError() return req.extras @classmethod def parse_group(cls, group, lines, dist=None): """Parse an entry point group""" if not MODULE(group): raise ValueError("Invalid group name", group) this = {} for line in yield_lines(lines): ep = cls.parse(line, dist) if ep.name in this: raise ValueError("Duplicate entry point", group, ep.name) this[ep.name]=ep return this @classmethod def parse_map(cls, data, dist=None): """Parse a map of entry point groups""" if isinstance(data, dict): data = data.items() else: data = split_sections(data) maps = {} for group, lines in data: if group is None: if not lines: continue raise ValueError("Entry points must be listed in groups") group = group.strip() if group in maps: raise ValueError("Duplicate group name", group) maps[group] = cls.parse_group(group, lines, dist) return maps def _remove_md5_fragment(location): if not location: return '' parsed = urlparse(location) if parsed[-1].startswith('md5='): return urlunparse(parsed[:-1] + ('',)) return location class Distribution(object): """Wrap an actual or potential sys.path entry w/metadata""" PKG_INFO = 'PKG-INFO' def __init__(self, location=None, metadata=None, project_name=None, version=None, py_version=PY_MAJOR, platform=None, precedence=EGG_DIST): self.project_name = safe_name(project_name or 'Unknown') if version is not None: self._version = safe_version(version) self.py_version = py_version self.platform = platform self.location = location self.precedence = precedence self._provider = metadata or empty_provider @classmethod def from_location(cls, location, basename, metadata=None,**kw): project_name, version, py_version, platform = [None]*4 basename, ext = os.path.splitext(basename) if ext.lower() in _distributionImpl: # .dist-info gets much metadata differently match = EGG_NAME(basename) if match: project_name, version, py_version, platform = match.group( 'name','ver','pyver','plat' ) cls = _distributionImpl[ext.lower()] return cls( location, metadata, project_name=project_name, version=version, py_version=py_version, platform=platform, **kw ) @property def hashcmp(self): return ( self.parsed_version, self.precedence, self.key, _remove_md5_fragment(self.location), self.py_version or '', self.platform or '', ) def __hash__(self): return hash(self.hashcmp) def __lt__(self, other): return self.hashcmp < other.hashcmp def __le__(self, other): return self.hashcmp <= other.hashcmp def __gt__(self, other): return self.hashcmp > other.hashcmp def __ge__(self, other): return self.hashcmp >= other.hashcmp def __eq__(self, other): if not isinstance(other, self.__class__): # It's not a Distribution, so they are not equal return False return self.hashcmp == other.hashcmp def __ne__(self, other): return not self == other # These properties have to be lazy so that we don't have to load any # metadata until/unless it's actually needed. (i.e., some distributions # may not know their name or version without loading PKG-INFO) @property def key(self): try: return self._key except AttributeError: self._key = key = self.project_name.lower() return key @property def parsed_version(self): if not hasattr(self, "_parsed_version"): self._parsed_version = parse_version(self.version) return self._parsed_version def _warn_legacy_version(self): LV = packaging.version.LegacyVersion is_legacy = isinstance(self._parsed_version, LV) if not is_legacy: return # While an empty version is technically a legacy version and # is not a valid PEP 440 version, it's also unlikely to # actually come from someone and instead it is more likely that # it comes from setuptools attempting to parse a filename and # including it in the list. So for that we'll gate this warning # on if the version is anything at all or not. if not self.version: return tmpl = textwrap.dedent(""" '{project_name} ({version})' is being parsed as a legacy, non PEP 440, version. You may find odd behavior and sort order. In particular it will be sorted as less than 0.0. It is recommended to migrate to PEP 440 compatible versions. """).strip().replace('\n', ' ') warnings.warn(tmpl.format(**vars(self)), PEP440Warning) @property def version(self): try: return self._version except AttributeError: for line in self._get_metadata(self.PKG_INFO): if line.lower().startswith('version:'): self._version = safe_version(line.split(':',1)[1].strip()) return self._version else: tmpl = "Missing 'Version:' header and/or %s file" raise ValueError(tmpl % self.PKG_INFO, self) @property def _dep_map(self): try: return self.__dep_map except AttributeError: dm = self.__dep_map = {None: []} for name in 'requires.txt', 'depends.txt': for extra, reqs in split_sections(self._get_metadata(name)): if extra: if ':' in extra: extra, marker = extra.split(':', 1) if invalid_marker(marker): # XXX warn reqs=[] elif not evaluate_marker(marker): reqs=[] extra = safe_extra(extra) or None dm.setdefault(extra,[]).extend(parse_requirements(reqs)) return dm def requires(self, extras=()): """List of Requirements needed for this distro if `extras` are used""" dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) return deps def _get_metadata(self, name): if self.has_metadata(name): for line in self.get_metadata_lines(name): yield line def activate(self, path=None): """Ensure distribution is importable on `path` (default=sys.path)""" if path is None: path = sys.path self.insert_on(path) if path is sys.path: fixup_namespace_packages(self.location) for pkg in self._get_metadata('namespace_packages.txt'): if pkg in sys.modules: declare_namespace(pkg) def egg_name(self): """Return what this distribution's standard .egg filename should be""" filename = "%s-%s-py%s" % ( to_filename(self.project_name), to_filename(self.version), self.py_version or PY_MAJOR ) if self.platform: filename += '-' + self.platform return filename def __repr__(self): if self.location: return "%s (%s)" % (self, self.location) else: return str(self) def __str__(self): try: version = getattr(self, 'version', None) except ValueError: version = None version = version or "[unknown version]" return "%s %s" % (self.project_name, version) def __getattr__(self, attr): """Delegate all unrecognized public attributes to .metadata provider""" if attr.startswith('_'): raise AttributeError(attr) return getattr(self._provider, attr) @classmethod def from_filename(cls, filename, metadata=None, **kw): return cls.from_location( _normalize_cached(filename), os.path.basename(filename), metadata, **kw ) def as_requirement(self): """Return a ``Requirement`` that matches this distribution exactly""" if isinstance(self.parsed_version, packaging.version.Version): spec = "%s==%s" % (self.project_name, self.parsed_version) else: spec = "%s===%s" % (self.project_name, self.parsed_version) return Requirement.parse(spec) def load_entry_point(self, group, name): """Return the `name` entry point of `group` or raise ImportError""" ep = self.get_entry_info(group, name) if ep is None: raise ImportError("Entry point %r not found" % ((group, name),)) return ep.load() def get_entry_map(self, group=None): """Return the entry point map for `group`, or the full entry map""" try: ep_map = self._ep_map except AttributeError: ep_map = self._ep_map = EntryPoint.parse_map( self._get_metadata('entry_points.txt'), self ) if group is not None: return ep_map.get(group,{}) return ep_map def get_entry_info(self, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return self.get_entry_map(group).get(name) def insert_on(self, path, loc = None): """Insert self.location in path before its nearest parent directory""" loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath= [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: break elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p+1) except ValueError: break else: del npath[np], path[np] # ha! p = np return def check_version_conflict(self): if self.key == 'setuptools': # ignore the inevitable setuptools self-conflicts :( return nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) loc = normalize_path(self.location) for modname in self._get_metadata('top_level.txt'): if (modname not in sys.modules or modname in nsp or modname in _namespace_packages): continue if modname in ('pkg_resources', 'setuptools', 'site'): continue fn = getattr(sys.modules[modname], '__file__', None) if fn and (normalize_path(fn).startswith(loc) or fn.startswith(self.location)): continue issue_warning( "Module %s was already imported from %s, but %s is being added" " to sys.path" % (modname, fn, self.location), ) def has_version(self): try: self.version except ValueError: issue_warning("Unbuilt egg for " + repr(self)) return False return True def clone(self,**kw): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): kw.setdefault(attr, getattr(self, attr, None)) kw.setdefault('metadata', self._provider) return self.__class__(**kw) @property def extras(self): return [dep for dep in self._dep_map if dep] class DistInfoDistribution(Distribution): """Wrap an actual or potential sys.path entry w/metadata, .dist-info style""" PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @property def _parsed_pkg_info(self): """Parse and cache metadata""" try: return self._pkg_info except AttributeError: metadata = self.get_metadata(self.PKG_INFO) self._pkg_info = email.parser.Parser().parsestr(metadata) return self._pkg_info @property def _dep_map(self): try: return self.__dep_map except AttributeError: self.__dep_map = self._compute_dependencies() return self.__dep_map def _preparse_requirement(self, requires_dist): """Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz') Split environment marker, add == prefix to version specifiers as necessary, and remove parenthesis. """ parts = requires_dist.split(';', 1) + [''] distvers = parts[0].strip() mark = parts[1].strip() distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers) distvers = distvers.replace('(', '').replace(')', '') return (distvers, mark) def _compute_dependencies(self): """Recompute this distribution's dependencies.""" from pip._vendor._markerlib import compile as compile_marker dm = self.__dep_map = {None: []} reqs = [] # Including any condition expressions for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: distvers, mark = self._preparse_requirement(req) parsed = next(parse_requirements(distvers)) parsed.marker_fn = compile_marker(mark) reqs.append(parsed) def reqs_for_extra(extra): for req in reqs: if req.marker_fn(override={'extra':extra}): yield req common = frozenset(reqs_for_extra(None)) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: extra = safe_extra(extra.strip()) dm[extra] = list(frozenset(reqs_for_extra(extra)) - common) return dm _distributionImpl = { '.egg': Distribution, '.egg-info': Distribution, '.dist-info': DistInfoDistribution, } def issue_warning(*args,**kw): level = 1 g = globals() try: # find the first stack frame that is *not* code in # the pkg_resources module, to use for the warning while sys._getframe(level).f_globals is g: level += 1 except ValueError: pass warnings.warn(stacklevel=level + 1, *args, **kw) class RequirementParseError(ValueError): def __str__(self): return ' '.join(self.args) def parse_requirements(strs): """Yield ``Requirement`` objects for each specification in `strs` `strs` must be a string, or a (possibly-nested) iterable thereof. """ # create a steppable iterator, so we can handle \-continuations lines = iter(yield_lines(strs)) def scan_list(ITEM, TERMINATOR, line, p, groups, item_name): items = [] while not TERMINATOR(line, p): if CONTINUE(line, p): try: line = next(lines) p = 0 except StopIteration: msg = "\\ must not appear on the last nonblank line" raise RequirementParseError(msg) match = ITEM(line, p) if not match: msg = "Expected " + item_name + " in" raise RequirementParseError(msg, line, "at", line[p:]) items.append(match.group(*groups)) p = match.end() match = COMMA(line, p) if match: # skip the comma p = match.end() elif not TERMINATOR(line, p): msg = "Expected ',' or end-of-list in" raise RequirementParseError(msg, line, "at", line[p:]) match = TERMINATOR(line, p) # skip the terminator, if any if match: p = match.end() return line, p, items for line in lines: match = DISTRO(line) if not match: raise RequirementParseError("Missing distribution spec", line) project_name = match.group(1) p = match.end() extras = [] match = OBRACKET(line, p) if match: p = match.end() line, p, extras = scan_list( DISTRO, CBRACKET, line, p, (1,), "'extra' name" ) line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2), "version spec") specs = [(op, val) for op, val in specs] yield Requirement(project_name, specs, extras) class Requirement: def __init__(self, project_name, specs, extras): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" self.unsafe_name, project_name = project_name, safe_name(project_name) self.project_name, self.key = project_name, project_name.lower() self.specifier = packaging.specifiers.SpecifierSet( ",".join(["".join([x, y]) for x, y in specs]) ) self.specs = specs self.extras = tuple(map(safe_extra, extras)) self.hashCmp = ( self.key, self.specifier, frozenset(self.extras), ) self.__hash = hash(self.hashCmp) def __str__(self): extras = ','.join(self.extras) if extras: extras = '[%s]' % extras return '%s%s%s' % (self.project_name, extras, self.specifier) def __eq__(self, other): return ( isinstance(other, Requirement) and self.hashCmp == other.hashCmp ) def __ne__(self, other): return not self == other def __contains__(self, item): if isinstance(item, Distribution): if item.key != self.key: return False item = item.version # Allow prereleases always in order to match the previous behavior of # this method. In the future this should be smarter and follow PEP 440 # more accurately. return self.specifier.contains(item, prereleases=True) def __hash__(self): return self.__hash def __repr__(self): return "Requirement.parse(%r)" % str(self) @staticmethod def parse(s): reqs = list(parse_requirements(s)) if reqs: if len(reqs) == 1: return reqs[0] raise ValueError("Expected only one requirement", s) raise ValueError("No requirements found", s) def _get_mro(cls): """Get an mro for a type or classic class""" if not isinstance(cls, type): class cls(cls, object): pass return cls.__mro__[1:] return cls.__mro__ def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" for t in _get_mro(getattr(ob, '__class__', type(ob))): if t in registry: return registry[t] def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) def _bypass_ensure_directory(path): """Sandbox-bypassing version of ensure_directory()""" if not WRITE_SUPPORT: raise IOError('"os.mkdir" not supported on this platform.') dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) mkdir(dirname, 0o755) def split_sections(s): """Split a string or iterable thereof into (section, content) pairs Each ``section`` is a stripped version of the section header ("[section]") and each ``content`` is a list of stripped lines excluding blank lines and comment-only lines. If there are any such lines before the first section header, they're returned in a first ``section`` of ``None``. """ section = None content = [] for line in yield_lines(s): if line.startswith("["): if line.endswith("]"): if section or content: yield section, content section = line[1:-1].strip() content = [] else: raise ValueError("Invalid section heading", line) else: content.append(line) # wrap up last segment yield section, content def _mkstemp(*args,**kw): old_open = os.open try: # temporarily bypass sandboxing os.open = os_open return tempfile.mkstemp(*args,**kw) finally: # and then put it back os.open = old_open # Silence the PEP440Warning by default, so that end users don't get hit by it # randomly just because they use pkg_resources. We want to append the rule # because we want earlier uses of filterwarnings to take precedence over this # one. warnings.filterwarnings("ignore", category=PEP440Warning, append=True) # from jaraco.functools 1.3 def _call_aside(f, *args, **kwargs): f(*args, **kwargs) return f @_call_aside def _initialize(g=globals()): "Set up global resource manager (deliberately not state-saved)" manager = ResourceManager() g['_manager'] = manager for name in dir(manager): if not name.startswith('_'): g[name] = getattr(manager, name) @_call_aside def _initialize_master_working_set(): """ Prepare the master working set and make the ``require()`` API available. This function has explicit effects on the global state of pkg_resources. It is intended to be invoked once at the initialization of this module. Invocation by other packages is unsupported and done at their own risk. """ working_set = WorkingSet._build_master() _declare_state('object', working_set=working_set) require = working_set.require iter_entry_points = working_set.iter_entry_points add_activation_listener = working_set.subscribe run_script = working_set.run_script # backward compatibility run_main = run_script # Activate all distributions already on sys.path, and ensure that # all distributions added to the working set in the future (e.g. by # calling ``require()``) will get activated as well. add_activation_listener(lambda dist: dist.activate()) working_set.entries=[] # match order list(map(working_set.add_entry, sys.path)) globals().update(locals())
mit
knxd/PyKNyX
pyknyx/stack/multicastSocket.py
2
5201
# -*- coding: utf-8 -*- """ Python KNX framework License ======= - B{PyKNyX} (U{https://github.com/knxd/pyknyx}) is Copyright: - © 2016-2017 Matthias Urlichs - PyKNyX is a fork of pKNyX - © 2013-2015 Frédéric Mantegazza This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA or see: - U{http://www.gnu.org/licenses/gpl.html} Module purpose ============== UDP Multicast support. Implements ========== - B{McastSockValueError} - B{MulticastSocketBase} - B{MulticastSocketReceive} - B{MulticastSocketTransmit} Documentation ============= See U{http://www.tldp.org/HOWTO/Multicast-HOWTO.html} Usage ===== @author: Frédéric Mantegazza @author: Jakub Wroniecki @copyright: (C) 2013-2015 Frédéric Mantegazza @copyright: (C) 2009 Jakub Wroniecki, STANSAT @license: GPL """ import socket import struct import six from pyknyx.common.exception import PyKNyXValueError from pyknyx.services.logger import logging; logger = logging.getLogger(__name__) class McastSockValueError(PyKNyXValueError): """ """ class MulticastSocketBase(socket.socket): """ Multicast socket """ def __init__(self, localAddr, localPort, ttl=32, loop=1): """ Init the multicast socket base class @param localAddr: IP address used as local address @type: localAddr: str @param localPort: port used as local port @type: localPort: int @param ttl: 0 Restricted to the same host (won't be output by any interface) 1 Restricted to the same subnet (won't be forwarded by a router) <32 Restricted to the same site, organization or department <64 Restricted to the same region <128 Restricted to the same continent <255 Unrestricted in scope. Global @type ttl: int @param loop: @type loop: int """ super(MulticastSocketBase, self).__init__(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) self._localAddr = localAddr self._localPort = localPort self._ttl= ttl self._loop = loop self.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, ttl) self.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, loop) self._bind() def _bind(self): """ """ raise NotImplementedError @property def localAddress(self): return self._localAddr @property def localPort(self): return self._localPort class MulticastSocketReceive(MulticastSocketBase): """ """ def __init__(self, localAddr, localPort, mcastAddr, mcastPort, timeout=1, ttl=32, loop=1): """ """ multicast = six.byte2int(socket.inet_aton(mcastAddr)) in range(224, 240) if not multicast: raise McastSockValueError("address is not a multicast destination (%s)" % repr(mcastAddr)) self._mcastAddr = mcastAddr super(MulticastSocketReceive, self).__init__(localAddr, mcastPort, ttl, loop) self.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self._localAddr)) value = struct.pack("=4sl", socket.inet_aton(mcastAddr), socket.INADDR_ANY) self.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, value) self.settimeout(timeout) def _bind(self): """ @todo: use mcastAddr, instead of ""? """ self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) except: logger.exception("MulticastSocketBase.__init__(): system doesn't support SO_REUSEPORT") self.bind(("", self._localPort)) def receive(self): """ """ return self.recvfrom(1024) class MulticastSocketTransmit(MulticastSocketBase): """ """ def __init__(self, localAddr, localPort, mcastAddr, mcastPort, ttl=32, loop=1): """ """ super(MulticastSocketTransmit, self).__init__(localAddr, localPort, ttl, loop) self._mcastAddr = mcastAddr self._mcastPort = mcastPort def _bind(self): """ """ self.bind((self._localAddr, self._localPort)) if self._localPort == 0: self._localPort = self.getsockname()[1] def transmit(self, data): """ """ l = self.sendto(data, (self._mcastAddr, self._mcastPort)) if l > 0 and l < len(data): raise IOError("partial transmit: %d of %d to %s", l, len(data), self)
gpl-3.0
shingonoide/odoo
openerp/tools/import_email.py
337
6376
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import os, sys import re import smtplib import email, mimetypes from email.header import decode_header from email.mime.text import MIMEText import xmlrpclib warn_msg = """ Bonjour, Le message avec le sujet "%s" n'a pu être archivé dans l'ERP. """.decode('utf-8') class EmailParser(object): def __init__(self, headers, dispatcher): self.headers = headers self.dispatcher = dispatcher def parse(self, msg): dispatcher((self.headers, msg)) class CommandDispatcher(object): def __init__(self, receiver): self.receiver = receiver def __call__(self, request): return self.receiver(request) class RPCProxy(object): def __init__(self, uid, passwd, host='localhost', port=8069, path='object'): self.rpc = xmlrpclib.ServerProxy('http://%s:%s/%s' % (host, port, path)) self.user_id = uid self.passwd = passwd def __call__(self, request): return self.rpc.execute(self.user_id, self.passwd, *request) class ReceiverEmail2Event(object): email_re = re.compile(r""" ([a-zA-Z][\w\.-]*[a-zA-Z0-9] # username part @ # mandatory @ sign [a-zA-Z0-9][\w\.-]* # domain must start with a letter \. [a-z]{2,3} # TLD ) """, re.VERBOSE) project_re = re.compile(r"^ *\[?(\d{4}\.?\d{0,3})\]?", re.UNICODE) def __init__(self, rpc): self.rpc = rpc def get_addresses(self, headers, msg): hcontent = '' for header in [h for h in headers if msg.has_key(h)]: hcontent += msg[header] return self.email_re.findall(hcontent) def get_partners(self, headers, msg): alladdresses = self.get_addresses(headers, msg) address_ids = self.rpc(('res.partner', 'search', [('email', 'in', alladdresses)])) addresses = self.rpc(('res.partner', 'read', address_ids)) return [x['partner_id'][0] for x in addresses] def __call__(self, request): headers, msg = request partners = self.get_partners(headers, msg) subject = u'' for string, charset in decode_header(msg['Subject']): if charset: subject += string.decode(charset) else: subject += unicode(string) if partners: self.save_mail(msg, subject, partners) else: warning = MIMEText((warn_msg % (subject,)).encode('utf-8'), 'plain', 'utf-8') warning['Subject'] = 'Message de OpenERP' warning['From'] = '[email protected]' warning['To'] = msg['From'] s = smtplib.SMTP() s.connect() s.sendmail('[email protected]', self.email_re.findall(msg['From']), warning.as_string()) s.close() if msg.is_multipart(): for message in [m for m in msg.get_payload() if m.get_content_type() == 'message/rfc822']: self((headers, message.get_payload()[0])) def save_mail(self, msg, subject, partners): counter, description = 1, u'' if msg.is_multipart(): for part in msg.get_payload(): stockdir = os.path.join('emails', msg['Message-Id'][1:-1]) newdir = os.path.join('/tmp', stockdir) filename = part.get_filename() if not filename: ext = mimetypes.guess_extension(part.get_type()) if not ext: ext = '.bin' filename = 'part-%03d%s' % (counter, ext) if part.get_content_maintype() == 'multipart': continue elif part.get_content_maintype() == 'text': if part.get_content_subtype() == 'plain': description += part.get_payload(decode=1).decode(part.get_charsets()[0]) description += u'\n\nVous trouverez les éventuels fichiers dans le répertoire: %s' % stockdir continue else: description += u'\n\nCe message est en "%s", vous trouverez ce texte dans le répertoire: %s' % (part.get_content_type(), stockdir) elif part.get_content_type() == 'message/rfc822': continue if not os.path.isdir(newdir): os.mkdir(newdir) counter += 1 fd = file(os.path.join(newdir, filename), 'w') fd.write(part.get_payload(decode=1)) fd.close() else: description = msg.get_payload(decode=1).decode(msg.get_charsets()[0]) project = self.project_re.search(subject) if project: project = project.groups()[0] else: project = '' for partner in partners: self.rpc(('res.partner.event', 'create', {'name' : subject, 'partner_id' : partner, 'description' : description, 'project' : project})) if __name__ == '__main__': rpc_dispatcher = CommandDispatcher(RPCProxy(4, 'admin')) dispatcher = CommandDispatcher(ReceiverEmail2Event(rpc_dispatcher)) parser = EmailParser(['To', 'Cc', 'From'], dispatcher) parser.parse(email.message_from_file(sys.stdin)) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
google-research/language
language/xsp/data_preprocessing/compute_asql_coverage_spider.py
1
3106
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 r"""Compute coverage for Abstract SQL for Spider. Example usage: ${PATH_TO_BINARY} \ --spider_examples_json=${SPIDER_DIR}/train_spider.json \ --spider_tables_json=${SPIDER_DIR}/tables.json \ --alsologtostderr """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json from absl import app from absl import flags from language.xsp.data_preprocessing import abstract_sql from language.xsp.data_preprocessing import abstract_sql_converters FLAGS = flags.FLAGS flags.DEFINE_string('spider_examples_json', '', 'Path to Spider json examples') flags.DEFINE_string('spider_tables_json', '', 'Path to Spider json tables') def _load_json(filename): with open(filename) as json_file: return json.load(json_file) def compute_spider_coverage(spider_examples_json, spider_tables_json): """Prints out statistics for asql conversions.""" table_json = _load_json(spider_tables_json) # Map of database id to a list of ForiegnKeyRelation tuples. foreign_key_map = abstract_sql_converters.spider_foreign_keys_map(table_json) table_schema_map = abstract_sql_converters.spider_table_schemas_map( table_json) examples = _load_json(spider_examples_json) num_examples = 0 num_conversion_failures = 0 num_reconstruction_failtures = 0 for example in examples: num_examples += 1 print('Parsing example number %s: %s' % (num_examples, example['query'])) gold_sql_query = example['query'] foreign_keys = foreign_key_map[example['db_id']] table_schema = table_schema_map[example['db_id']] try: sql_spans = abstract_sql.sql_to_sql_spans(gold_sql_query, table_schema) sql_spans = abstract_sql.replace_from_clause(sql_spans) except abstract_sql.UnsupportedSqlError as e: print('Error converting:\n%s\n%s' % (gold_sql_query, e)) num_conversion_failures += 1 else: try: sql_spans = abstract_sql.restore_from_clause(sql_spans, foreign_keys) except abstract_sql.UnsupportedSqlError as e: print('Error recontructing:\n%s\n%s' % (gold_sql_query, e)) num_reconstruction_failtures += 1 print('Examples: %s' % num_examples) print('Failed conversions: %s' % num_conversion_failures) print('Failed reconstructions: %s' % num_reconstruction_failtures) def main(unused_argv): compute_spider_coverage(FLAGS.spider_examples_json, FLAGS.spider_tables_json) if __name__ == '__main__': app.run(main)
apache-2.0
io7m/jcalcium
io7m-jcalcium-blender/src/main/resources/__init__.py
1
3191
# # Copyright © 2016 <[email protected]> http://io7m.com # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # bl_info = { "name": "Calcium JSON format", "author": "io7m", "version": (0, 1, 0), "blender": (2, 66, 0), "location": "File > Export > Calcium JSON (.csj)", "description": "Export armatures to Calcium format", "warning": "", "wiki_url": "", "tracker_url": "https://github.com/io7m/jcalcium/issues", "category": "Import-Export" } import bpy import bpy_extras.io_utils import mathutils CalciumOrientationHelper = bpy_extras.io_utils.orientation_helper_factory("CalciumOrientationHelper", axis_forward='-Z', axis_up='Y') class ExportCalcium(bpy.types.Operator, bpy_extras.io_utils.ExportHelper, CalciumOrientationHelper): bl_idname = "export_scene.csj" bl_label = "Export Calcium" # The filename_ext field is accessed by ExportHelper. filename_ext = ".csj" filepath = bpy.props.StringProperty(subtype='FILE_PATH') verbose = bpy.props.BoolProperty(name="Verbose logging",description="Enable verbose debug logging",default=True) def execute(self, context): self.filepath = bpy.path.ensure_ext(self.filepath, ".csj") args = {} args['verbose'] = self.verbose assert type(args['verbose']) == bool args['conversion_matrix'] = bpy_extras.io_utils.axis_conversion(to_forward=self.axis_forward, to_up=self.axis_up).to_4x4() assert type(args['conversion_matrix'] == mathutils.Matrix) from . import export e = export.CalciumExporter(args) try: e.write(self.filepath) except export.CalciumNoArmatureSelected as ex: self.report({'ERROR'}, ex.value) except export.CalciumTooManyArmaturesSelected as ex: self.report({'ERROR'}, ex.value) except export.CalciumExportFailed as ex: self.report({'ERROR'}, ex.value) #endtry return {'FINISHED'} #end def invoke(self, context, event): if not self.filepath: self.filepath = bpy.path.ensure_ext(bpy.data.filepath, ".csj") context.window_manager.fileselect_add(self) return {'RUNNING_MODAL'} #end #endclass def menuFunction(self, context): self.layout.operator(ExportCalcium.bl_idname, text="Calcium JSON (.csj)") #end def register(): bpy.utils.register_class(ExportCalcium) bpy.types.INFO_MT_file_export.append(menuFunction) #end def unregister(): bpy.utils.unregister_class(ExportCalcium) bpy.types.INFO_MT_file_export.remove(menuFunction) #end if __name__ == "__main__": register() #endif
isc
rbarlow/pulp
client_lib/pulp/client/commands/repo/history.py
17
7546
""" Commands for showing a repository's sync and publish history """ from gettext import gettext as _ from pulp.client.commands.options import OPTION_REPO_ID from pulp.client.extensions.extensions import PulpCliOption, PulpCliFlag, PulpCliCommand from pulp.client import validators # The default limit on the number of history entries to display REPO_HISTORY_LIMIT = 5 # Descriptions DESC_DETAILS = _('if specified, all history information is displayed') DESC_DISTRIBUTOR_ID = _('the distributor id to display history entries for') DESC_END_DATE = _('only return entries that occur on or before the given date in iso8601 format' ' (yyyy-mm-ddThh:mm:ssZ)') DESC_LIMIT = _( 'limits displayed history entries to the given amount (must be greater than zero); the default' ' is %(limit)s' % {'limit': REPO_HISTORY_LIMIT}) DESC_PUBLISH_HISTORY = _('displays the history of publish operations on a repository') DESC_SORT = _('indicates the sort direction ("ascending" or "descending") based on the timestamp') DESC_SYNC_HISTORY = _('displays the history of sync operations on a repository') DESC_START_DATE = _('only return entries that occur on or after the given date in iso8601 format' ' (yyyy-mm-ddThh:mm:ssZ)') # Options OPTION_END_DATE = PulpCliOption('--end-date', DESC_END_DATE, required=False, validate_func=validators.iso8601_datetime_validator) OPTION_LIMIT = PulpCliOption('--limit', DESC_LIMIT, required=False, validate_func=validators.positive_int_validator) OPTION_SORT = PulpCliOption('--sort', DESC_SORT, required=False) OPTION_DISTRIBUTOR_ID = PulpCliOption('--distributor-id', DESC_DISTRIBUTOR_ID, required=True, validate_func=validators.id_validator) OPTION_START_DATE = PulpCliOption('--start-date', DESC_START_DATE, required=False, validate_func=validators.iso8601_datetime_validator) # Flags FLAG_DETAILS = PulpCliFlag('--details', DESC_DETAILS, aliases='-d') class SyncHistoryCommand(PulpCliCommand): """ Displays the sync history of a given repository """ def __init__(self, context, name='sync', description=DESC_SYNC_HISTORY): """ :param context: The client context used to interact with the client framework and server :type context: pulp.client.extensions.core.ClientContext :param name: The name of the command in the history section :type name: str :param description: The description to use in the cli :type description: str """ # The context is used to access the server and prompt. self.context = context super(SyncHistoryCommand, self).__init__(name, description, self.run) self.add_option(OPTION_REPO_ID) self.add_option(OPTION_LIMIT) self.add_option(OPTION_SORT) self.add_option(OPTION_START_DATE) self.add_option(OPTION_END_DATE) self.add_flag(FLAG_DETAILS) self.fields_to_display = ['repo_id', 'result', 'started', 'completed', 'added_count', 'removed_count', 'updated_count'] def run(self, **user_input): """ The action to take when the sync history command is executed :param user_input: the options and flags provided by the user :type user_input: dict """ # Collect input repo_id = user_input[OPTION_REPO_ID.keyword] if user_input[OPTION_LIMIT.keyword] is not None: limit = int(user_input[OPTION_LIMIT.keyword]) else: limit = REPO_HISTORY_LIMIT start_date = user_input[OPTION_START_DATE.keyword] end_date = user_input[OPTION_END_DATE.keyword] sort = user_input[OPTION_SORT.keyword] details = user_input[FLAG_DETAILS.keyword] # Request the sync history from the server sync_list = self.context.server.repo_history.sync_history(repo_id, limit, sort, start_date, end_date).response_body # Filter the fields to show and define the order in which they are displayed if details is True: self.fields_to_display.append('summary') self.fields_to_display.append('details') filters = order = self.fields_to_display # Render results title = _('Sync History [ %(repo)s ]') % {'repo': repo_id} self.context.prompt.render_title(title) self.context.prompt.render_document_list(sync_list, filters=filters, order=order) class PublishHistoryCommand(PulpCliCommand): """ Displays the publish history of a given repository and publisher """ def __init__(self, context, name='publish', description=DESC_PUBLISH_HISTORY): """ :param context: The client context used to interact with the client framework and server :type context: pulp.client.extensions.core.ClientContext :param name: The name of the command in the history section :type name: str :param description: The description to use in the cli :type description: str """ # The context is used to access the server and prompt. self.context = context super(PublishHistoryCommand, self).__init__(name, description, self.run) # History is given for a repo id and distributor id pair, so these are mandatory self.add_option(OPTION_REPO_ID) self.add_option(OPTION_DISTRIBUTOR_ID) self.add_option(OPTION_LIMIT) self.add_option(OPTION_SORT) self.add_option(OPTION_START_DATE) self.add_option(OPTION_END_DATE) self.add_flag(FLAG_DETAILS) # Set the default fields to display self.fields_to_display = ['repo_id', 'distributor_id', 'result', 'started', 'completed'] def run(self, **user_input): """ The action to take when the sync history command is executed :param user_input: the options and flags provided by the user :type user_input: dict """ # Collect input repo_id = user_input[OPTION_REPO_ID.keyword] distributor_id = user_input[OPTION_DISTRIBUTOR_ID.keyword] if user_input[OPTION_LIMIT.keyword] is not None: limit = int(user_input[OPTION_LIMIT.keyword]) else: limit = REPO_HISTORY_LIMIT start_date = user_input[OPTION_START_DATE.keyword] end_date = user_input[OPTION_END_DATE.keyword] sort = user_input[OPTION_SORT.keyword] details = user_input[FLAG_DETAILS.keyword] # Request the publish history from the server publish_list = self.context.server.repo_history.publish_history(repo_id, distributor_id, limit, sort, start_date, end_date) publish_list = publish_list.response_body # Filter the fields to show and define the order in which they are displayed if details is True: self.fields_to_display.append('summary') self.fields_to_display.append('details') filters = order = self.fields_to_display # Render results title = _('Publish History [ %(repo)s ]') % {'repo': repo_id} self.context.prompt.render_title(title) self.context.prompt.render_document_list(publish_list, filters=filters, order=order)
gpl-2.0
nathanaevitas/odoo
openerp/addons/hr_recruitment/res_config.py
352
3627
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-Today OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import SUPERUSER_ID from openerp.osv import fields, osv class hr_applicant_settings(osv.TransientModel): _name = 'hr.config.settings' _inherit = ['hr.config.settings', 'fetchmail.config.settings'] _columns = { 'module_document': fields.boolean('Allow the automatic indexation of resumes', help='Manage your CV\'s and motivation letter related to all applicants.\n' '-This installs the module document_ftp. This will install the knowledge management module in order to allow you to search using specific keywords through the content of all documents (PDF, .DOCx...)'), 'alias_prefix': fields.char('Default Alias Name for Jobs'), 'alias_domain': fields.char('Alias Domain'), } _defaults = { 'alias_domain': lambda self, cr, uid, context: self.pool['mail.alias']._get_alias_domain(cr, SUPERUSER_ID, [1], None, None)[1], } def _find_default_job_alias_id(self, cr, uid, context=None): alias_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'hr_recruitment.mail_alias_jobs') if not alias_id: alias_ids = self.pool['mail.alias'].search( cr, uid, [ ('alias_model_id.model', '=', 'hr.applicant'), ('alias_force_thread_id', '=', False), ('alias_parent_model_id.model', '=', 'hr.job'), ('alias_parent_thread_id', '=', False), ('alias_defaults', '=', '{}') ], context=context) alias_id = alias_ids and alias_ids[0] or False return alias_id def get_default_alias_prefix(self, cr, uid, ids, context=None): alias_name = False alias_id = self._find_default_job_alias_id(cr, uid, context=context) if alias_id: alias_name = self.pool['mail.alias'].browse(cr, uid, alias_id, context=context).alias_name return {'alias_prefix': alias_name} def set_default_alias_prefix(self, cr, uid, ids, context=None): mail_alias = self.pool.get('mail.alias') for record in self.browse(cr, uid, ids, context=context): alias_id = self._find_default_job_alias_id(cr, uid, context=context) if not alias_id: create_ctx = dict(context, alias_model_name='hr.applicant', alias_parent_model_name='hr.job') alias_id = self.pool['mail.alias'].create(cr, uid, {'alias_name': record.alias_prefix}, context=create_ctx) else: mail_alias.write(cr, uid, alias_id, {'alias_name': record.alias_prefix}, context=context) return True
agpl-3.0
joelfrederico/SciSalt
scisalt/qt/mplwidget.py
1
13557
from PyQt4 import QtGui from PyQt4 import QtCore from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as _FigureCanvas from matplotlib.backends.backend_qt4 import NavigationToolbar2QT as _NavigationToolbar import matplotlib as _mpl import numpy as _np from .Rectangle import Rectangle import pdb import traceback import logging loggerlevel = logging.DEBUG logger = logging.getLogger(__name__) try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Slider_and_Text(QtGui.QWidget): valueChanged = QtCore.pyqtSignal(int) sliderReleased = QtCore.pyqtSignal(int) def __init__(self, parent=None): QtGui.QWidget.__init__(self) self.setMaximumHeight(40) # Enable tracking by default self._tracking = True self.hLayout = QtGui.QHBoxLayout() self.slider = QtGui.QSlider() self.leftbutton = QtGui.QPushButton() self.leftbutton.setText("<") sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.leftbutton.sizePolicy().hasHeightForWidth()) # self.leftbutton.setSizePolicy(sizePolicy) self.leftbutton.clicked.connect(self._subone) self.rightbutton = QtGui.QPushButton() self.rightbutton.setText(">") sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.rightbutton.sizePolicy().hasHeightForWidth()) # self.rightbutton.setSizePolicy(sizePolicy) self.rightbutton.clicked.connect(self._addone) self.v = QtGui.QIntValidator() self.box = QtGui.QLineEdit() self.box.setValidator(self.v) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.box.sizePolicy().hasHeightForWidth()) # self.box.setSizePolicy(sizePolicy) self.hLayout.addWidget(self.leftbutton) self.hLayout.addWidget(self.slider) self.hLayout.addWidget(self.box) self.hLayout.addWidget(self.rightbutton) self.setLayout(self.hLayout) self.slider.valueChanged.connect(self._sliderChanged) self.box.editingFinished.connect(self._textChanged) self.setOrientation(QtCore.Qt.Horizontal) # Connect release so tracking works as expected self.slider.sliderReleased.connect(self._sliderReleased) def _addone(self): self.value = self.value + 1 self.valueChanged.emit(self.value) def _subone(self): self.value = self.value - 1 self.valueChanged.emit(self.value) def _sliderReleased(self): print('Released') self.sliderReleased.emit(self.slider.value) def setTracking(self, val): print('Tracking set to {}'.format(val)) self._tracking = val def setMaximum(self, val): self.slider.setMaximum(val) self.v.setRange(self.slider.minimum(), self.slider.maximum()) self.box.setValidator(self.v) def setMinimum(self, val): self.slider.setMinimum(val) self.v.setRange(self.slider.minimum(), self.slider.maximum()) self.box.setValidator(self.v) def _sliderChanged(self, val): self.box.setText(str(val)) if self._tracking: try: self.slider.sliderReleased.disconnect() except: pass self.valueChanged.emit(val) else: try: self.slider.sliderReleased.disconnect() except: pass self.slider.sliderReleased.connect(self._sliderChanged_notracking) def _sliderChanged_notracking(self): val = self.slider.value() # print('Value to be emitted is {}'.format(val)) self.valueChanged.emit(val) def _textChanged(self): val = self.box.text() self.slider.setValue(int(val)) self._sliderChanged_notracking() def setOrientation(self, *args, **kwargs): self.slider.setOrientation(*args, **kwargs) def _getValue(self): return self.slider.value() def _setValue(self, val): self.slider.setValue(val) self.box.setText(str(val)) value = property(_getValue, _setValue) def setValue(self, val): self.slider.setValue(val) self.box.setText(str(val)) # self.valueChanged.emit(val) class Mpl_Plot(_FigureCanvas): def __init__(self, parent=None): # Initialize things self.fig = _mpl.figure.Figure() _FigureCanvas.__init__(self, self.fig) _FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) _FigureCanvas.updateGeometry(self) # Create axes self.ax = self.fig.add_subplot(111) def plot(self, *args, **kwargs): self.ax.clear() self.ax.plot(*args, **kwargs) self.ax.ticklabel_format(style='sci', scilimits=(0, 0), axis='y') self.ax.figure.canvas.draw() class Mpl_Image(QtGui.QWidget): # Signal for when the rectangle is changed rectChanged = QtCore.pyqtSignal(Rectangle) def __init__(self, parent=None, rectbool = True, toolbarbool=False, image=None): # Initialize things QtGui.QWidget.__init__(self) self.rectbool = rectbool self._clim_min = 0 self._clim_max = 3600 self._pressed = False # Add a vertical layout self.vLayout = QtGui.QVBoxLayout() # Add a figure self.fig = _mpl.figure.Figure() # Add a canvas containing the fig self.canvas = _FigureCanvas(self.fig) _FigureCanvas.setSizePolicy(self.canvas, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) _FigureCanvas.updateGeometry(self.canvas) # Setup the layout if toolbarbool: self.toolbar = _NavigationToolbar(self.canvas, self) self.toolbar.setMaximumHeight(20) self.vLayout.addWidget(self.toolbar) self.vLayout.addWidget(self.canvas) self.setLayout(self.vLayout) # Create axes self.ax = self.fig.add_subplot(111) # Include rectangle functionality if rectbool: self.fig.canvas.mpl_connect('button_press_event', self.on_press) self.fig.canvas.mpl_connect('button_release_event', self.on_release) self.Rectangle = Rectangle( x = -10 , y = 0 , width = 0 , height = 3 , axes = self.ax ) # Add image self.image = image def _get_img(self): return self._image def _set_img(self, image): self.ax.clear() self._image = image if image is not None: self._imgplot = self.ax.imshow(image, interpolation='none') if self.rectbool: self.ax.add_patch(self.Rectangle.get_rect()) # imagemax = _np.max(_np.max(image)) self.set_clim(self._clim_min, self._clim_max) image = property(_get_img, _set_img) def set_clim(self, clim_min, clim_max): if self.image is not None: self._clim_min = clim_min self._clim_max = clim_max self._imgplot.set_clim(clim_min, clim_max) self.ax.figure.canvas.draw() def on_press(self, event): if self.toolbar._active is None: self._pressed = True self.x0 = event.xdata self.y0 = event.ydata logger.log(level=loggerlevel, msg='Pressed: x0: {}, y0: {}'.format(self.x0, self.y0)) def on_release(self, event): if self._pressed: self._pressed = False print('release') self.x1 = event.xdata self.y1 = event.ydata width = self.x1 - self.x0 height = self.y1 - self.y0 logger.log(level=loggerlevel, msg='Released: x0: {}, y0: {}, x1: {}, y1: {}, width: {}, height: {}'.format( self.x0 , self.y0 , self.x1 , self.y1 , width , height ) ) self.Rectangle.set_xy((self.x0, self.y0)) self.Rectangle.set_width(width) self.Rectangle.set_height(height) self.ax.figure.canvas.draw() self.rectChanged.emit(self.Rectangle) # print(self.rect) def zoom_rect(self, border=None, border_px=None): # ====================================== # Get x coordinates # ====================================== x0 = self.Rectangle.get_x() width = self.Rectangle.get_width() x1 = x0+width # ====================================== # Get y coordinates # ====================================== y0 = self.Rectangle.get_y() height = self.Rectangle.get_height() y1 = y0+height # ====================================== # Validate borders # ====================================== if (border_px is None) and (border is not None): xborder = border[0]*width yborder = border[1]*height elif (border_px is not None) and (border is None): xborder = border_px[0] yborder = border_px[1] elif (border_px is None) and (border is None): raise IOError('No border info specified!') elif (border_px is not None) and (border is not None): raise IOError('Too much border info specified, both border_px and border!') else: raise IOError('End of the line!') # ====================================== # Add borders # ====================================== x0 = x0 - xborder x1 = x1 + xborder y0 = y0 - yborder y1 = y1 + yborder # ====================================== # Validate coordinates to prevent # unPythonic crash # ====================================== if not ((0 <= x0 and x0 <= self.image.shape[1]) and (0 <= x1 and x1 <= self.image.shape[1])): print('X issue') print('Requested: x=({}, {})'.format(x0, x1)) x0 = 0 x1 = self.image.shape[1] if not ((0 <= y0 and y0 <= self.image.shape[0]) and (0 <= y1 and y1 <= self.image.shape[0])): print('y issue') print('Requested: y=({}, {})'.format(y0, y1)) y0 = 0 y1 = self.image.shape[0] # ====================================== # Set viewable area # ====================================== self.ax.set_xlim(x0, x1) self.ax.set_ylim(y0, y1) # ====================================== # Redraw canvas to show updates # ====================================== self.ax.figure.canvas.draw() class Mpl_Image_Plus_Slider(QtGui.QWidget): # def __init__(self, parent=None, **kwargs): def __init__(self, parent=None, **kwargs): # Initialize self as a widget QtGui.QWidget.__init__(self, parent) # Add a vertical layout with parent self self.vLayout = QtGui.QVBoxLayout(self) self.vLayout.setObjectName(_fromUtf8("vLayout")) # Add an Mpl_Image widget to vLayout, # save it to self._img # Pass arguments through to Mpl_Image. self._img = Mpl_Image(parent=parent, toolbarbool=True, **kwargs) self._img.setObjectName(_fromUtf8("_img")) self.vLayout.addWidget(self._img) # Add a slider to vLayout, # save it to self.max_slider # self.max_slider = QtGui.QSlider(self) self.max_slider = Slider_and_Text(self) self.max_slider.setObjectName(_fromUtf8("max_slider")) self.max_slider.setOrientation(QtCore.Qt.Horizontal) self.vLayout.addWidget(self.max_slider) # Setup slider to work with _img's clims self.max_slider.valueChanged.connect(lambda val: self.set_clim(0, val)) def _get_image(self): return self._img.image def _set_image(self, image): self._img.image = image maximage = _np.max(_np.max(image)) self.max_slider.setMaximum(maximage) image = property(_get_image, _set_image) def _get_ax(self): return self._img.ax ax = property(_get_ax) def _get_Rectangle(self): return self._img.Rectangle # def _set_rect(self, rect): # self._img.rect(rect) Rectangle = property(_get_Rectangle) def zoom_rect(self, border=None, border_px=None): self._img.zoom_rect(border, border_px) def set_clim(self, *args, **kwargs): self._img.set_clim(*args, **kwargs) def setSliderValue(self, val): self.max_slider.setValue(val)
mit
wangyixiaohuihui/spark2-annotation
python/pyspark/streaming/flume.py
1
6047
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys if sys.version >= "3": from io import BytesIO else: from StringIO import StringIO from py4j.protocol import Py4JJavaError from pyspark.storagelevel import StorageLevel from pyspark.serializers import PairDeserializer, NoOpSerializer, UTF8Deserializer, read_int from pyspark.streaming import DStream __all__ = ['FlumeUtils', 'utf8_decoder'] def utf8_decoder(s): """ Decode the unicode as UTF-8 """ if s is None: return None return s.decode('utf-8') class FlumeUtils(object): @staticmethod def createStream(ssc, hostname, port, storageLevel=StorageLevel.MEMORY_AND_DISK_2, enableDecompression=False, bodyDecoder=utf8_decoder): """ Create an input stream that pulls events from Flume. :param ssc: StreamingContext object :param hostname: Hostname of the slave machine to which the flume data will be sent :param port: Port of the slave machine to which the flume data will be sent :param storageLevel: Storage level to use for storing the received objects :param enableDecompression: Should netty server decompress input stream :param bodyDecoder: A function used to decode body (default is utf8_decoder) :return: A DStream object """ jlevel = ssc._sc._getJavaStorageLevel(storageLevel) helper = FlumeUtils._get_helper(ssc._sc) jstream = helper.createStream(ssc._jssc, hostname, port, jlevel, enableDecompression) return FlumeUtils._toPythonDStream(ssc, jstream, bodyDecoder) @staticmethod def createPollingStream(ssc, addresses, storageLevel=StorageLevel.MEMORY_AND_DISK_2, maxBatchSize=1000, parallelism=5, bodyDecoder=utf8_decoder): """ Creates an input stream that is to be used with the Spark Sink deployed on a Flume agent. This stream will poll the sink for data and will pull events as they are available. :param ssc: StreamingContext object :param addresses: List of (host, port)s on which the Spark Sink is running. :param storageLevel: Storage level to use for storing the received objects :param maxBatchSize: The maximum number of events to be pulled from the Spark sink in a single RPC call :param parallelism: Number of concurrent requests this stream should send to the sink. Note that having a higher number of requests concurrently being pulled will result in this stream using more threads :param bodyDecoder: A function used to decode body (default is utf8_decoder) :return: A DStream object """ jlevel = ssc._sc._getJavaStorageLevel(storageLevel) hosts = [] ports = [] for (host, port) in addresses: hosts.append(host) ports.append(port) helper = FlumeUtils._get_helper(ssc._sc) jstream = helper.createPollingStream( ssc._jssc, hosts, ports, jlevel, maxBatchSize, parallelism) return FlumeUtils._toPythonDStream(ssc, jstream, bodyDecoder) @staticmethod def _toPythonDStream(ssc, jstream, bodyDecoder): ser = PairDeserializer(NoOpSerializer(), NoOpSerializer()) stream = DStream(jstream, ssc, ser) def func(event): headersBytes = BytesIO(event[0]) if sys.version >= "3" else StringIO(event[0]) headers = {} strSer = UTF8Deserializer() for i in range(0, read_int(headersBytes)): key = strSer.loads(headersBytes) value = strSer.loads(headersBytes) headers[key] = value body = bodyDecoder(event[1]) return (headers, body) return stream.map(func) @staticmethod def _get_helper(sc): try: return sc._jvm.org.apache.spark.streaming.flume.FlumeUtilsPythonHelper() except TypeError as e: if str(e) == "'JavaPackage' object is not callable": FlumeUtils._printErrorMsg(sc) raise @staticmethod def _printErrorMsg(sc): print(""" ________________________________________________________________________________________________ Spark Streaming's Flume libraries not found in class path. Try one of the following. 1. Include the Flume library and its dependencies with in the spark-submit command as $ bin/spark-submit --packages org.apache.spark:spark-streaming-flume:%s ... 2. Download the JAR of the artifact from Maven Central http://search.maven.org/, Group Id = org.apache.spark, Artifact Id = spark-streaming-flume-assembly, Version = %s. Then, include the jar in the spark-submit command as $ bin/spark-submit --jars <spark-streaming-flume-assembly.jar> ... ________________________________________________________________________________________________ """ % (sc.version, sc.version))
apache-2.0
Alwnikrotikz/hooke
mfp_igor_scripts/FMjoin.py
1
1503
#!/usr/bin/env python ''' FMjoin.py Copies all .ibw files contained in a folder and its subfolders into a single folder. Useful for force maps. Usage: python FMjoin.py origindir destdir Alberto Gomez-Casado (c) 2010, University of Twente (The Netherlands) This program is released under the GNU General Public License version 2. ''' import os import shutil import sys def main(*args): if len(sys.argv) < 2: print 'You must at least specify origin and destination folders.' return 0 origin=sys.argv[1] dest=sys.argv[2] if os.path.exists(origin): if os.path.exists(dest): if os.listdir(dest)!=[]: print 'Destination folder is not empty! Use another folder.' return 0 else: print 'Destination folder does not exist, will create it' os.mkdir(dest) else: print 'You provided a wrong origin folder name, try again.' origin=os.path.abspath(origin) dest=os.path.abspath(dest) for root, dirs, files in os.walk(origin): for filename in files: if filename.split('.')[1]!="ibw": continue filepath=os.path.join(root,filename) #to avoid overwriting, we collapse unique paths into filenames rawdest=filepath.split(os.path.commonprefix([origin, filepath]))[1] rawdest=rawdest.replace('/','') #for linux rawdest=rawdest.replace('\\','') #for windows destfile=os.path.join(dest,rawdest) print 'Copying '+rawdest shutil.copy(filepath,destfile) return 0 if __name__ == '__main__': sys.exit(main(*sys.argv))
lgpl-3.0
Yong-Lee/django
django/core/management/commands/squashmigrations.py
132
7265
from django.conf import settings from django.core.management.base import BaseCommand, CommandError from django.db import DEFAULT_DB_ALIAS, connections, migrations from django.db.migrations.loader import AmbiguityError, MigrationLoader from django.db.migrations.migration import SwappableTuple from django.db.migrations.optimizer import MigrationOptimizer from django.db.migrations.writer import MigrationWriter from django.utils import six from django.utils.version import get_docs_version class Command(BaseCommand): help = "Squashes an existing set of migrations (from first until specified) into a single new one." def add_arguments(self, parser): parser.add_argument('app_label', help='App label of the application to squash migrations for.') parser.add_argument('migration_name', help='Migrations will be squashed until and including this migration.') parser.add_argument('--no-optimize', action='store_true', dest='no_optimize', default=False, help='Do not try to optimize the squashed operations.') parser.add_argument('--noinput', action='store_false', dest='interactive', default=True, help='Tells Django to NOT prompt the user for input of any kind.') def handle(self, **options): self.verbosity = options.get('verbosity') self.interactive = options.get('interactive') app_label = options['app_label'] migration_name = options['migration_name'] no_optimize = options['no_optimize'] # Load the current graph state, check the app and migration they asked for exists loader = MigrationLoader(connections[DEFAULT_DB_ALIAS]) if app_label not in loader.migrated_apps: raise CommandError( "App '%s' does not have migrations (so squashmigrations on " "it makes no sense)" % app_label ) try: migration = loader.get_migration_by_prefix(app_label, migration_name) except AmbiguityError: raise CommandError( "More than one migration matches '%s' in app '%s'. Please be " "more specific." % (migration_name, app_label) ) except KeyError: raise CommandError( "Cannot find a migration matching '%s' from app '%s'." % (migration_name, app_label) ) # Work out the list of predecessor migrations migrations_to_squash = [ loader.get_migration(al, mn) for al, mn in loader.graph.forwards_plan((migration.app_label, migration.name)) if al == migration.app_label ] # Tell them what we're doing and optionally ask if we should proceed if self.verbosity > 0 or self.interactive: self.stdout.write(self.style.MIGRATE_HEADING("Will squash the following migrations:")) for migration in migrations_to_squash: self.stdout.write(" - %s" % migration.name) if self.interactive: answer = None while not answer or answer not in "yn": answer = six.moves.input("Do you wish to proceed? [yN] ") if not answer: answer = "n" break else: answer = answer[0].lower() if answer != "y": return # Load the operations from all those migrations and concat together, # along with collecting external dependencies and detecting # double-squashing operations = [] dependencies = set() for smigration in migrations_to_squash: if smigration.replaces: raise CommandError( "You cannot squash squashed migrations! Please transition " "it to a normal migration first: " "https://docs.djangoproject.com/en/%s/topics/migrations/#squashing-migrations" % get_docs_version() ) operations.extend(smigration.operations) for dependency in smigration.dependencies: if isinstance(dependency, SwappableTuple): if settings.AUTH_USER_MODEL == dependency.setting: dependencies.add(("__setting__", "AUTH_USER_MODEL")) else: dependencies.add(dependency) elif dependency[0] != smigration.app_label: dependencies.add(dependency) if no_optimize: if self.verbosity > 0: self.stdout.write(self.style.MIGRATE_HEADING("(Skipping optimization.)")) new_operations = operations else: if self.verbosity > 0: self.stdout.write(self.style.MIGRATE_HEADING("Optimizing...")) optimizer = MigrationOptimizer() new_operations = optimizer.optimize(operations, migration.app_label) if self.verbosity > 0: if len(new_operations) == len(operations): self.stdout.write(" No optimizations possible.") else: self.stdout.write( " Optimized from %s operations to %s operations." % (len(operations), len(new_operations)) ) # Work out the value of replaces (any squashed ones we're re-squashing) # need to feed their replaces into ours replaces = [] for migration in migrations_to_squash: if migration.replaces: replaces.extend(migration.replaces) else: replaces.append((migration.app_label, migration.name)) # Make a new migration with those operations subclass = type("Migration", (migrations.Migration, ), { "dependencies": dependencies, "operations": new_operations, "replaces": replaces, "initial": True, }) new_migration = subclass("0001_squashed_%s" % migration.name, app_label) # Write out the new migration file writer = MigrationWriter(new_migration) with open(writer.path, "wb") as fh: fh.write(writer.as_string()) if self.verbosity > 0: self.stdout.write(self.style.MIGRATE_HEADING("Created new squashed migration %s" % writer.path)) self.stdout.write(" You should commit this migration but leave the old ones in place;") self.stdout.write(" the new migration will be used for new installs. Once you are sure") self.stdout.write(" all instances of the codebase have applied the migrations you squashed,") self.stdout.write(" you can delete them.") if writer.needs_manual_porting: self.stdout.write(self.style.MIGRATE_HEADING("Manual porting required")) self.stdout.write(" Your migrations contained functions that must be manually copied over,") self.stdout.write(" as we could not safely copy their implementation.") self.stdout.write(" See the comment at the top of the squashed migration for details.")
bsd-3-clause